diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py index 70980a1a26..c36f147d50 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -13,15 +13,33 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .image_classification import ImageClassificationPredictionInstance -from .image_object_detection import ImageObjectDetectionPredictionInstance -from .image_segmentation import ImageSegmentationPredictionInstance -from .text_classification import TextClassificationPredictionInstance -from .text_extraction import TextExtractionPredictionInstance -from .text_sentiment import TextSentimentPredictionInstance -from .video_action_recognition import VideoActionRecognitionPredictionInstance -from .video_classification import VideoClassificationPredictionInstance -from .video_object_tracking import VideoObjectTrackingPredictionInstance +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) __all__ = ( "ImageClassificationPredictionInstance", diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py index eb4d25a1e3..135f3bff54 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -13,12 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .image_classification import ImageClassificationPredictionParams -from .image_object_detection import ImageObjectDetectionPredictionParams -from .image_segmentation import ImageSegmentationPredictionParams -from .video_action_recognition import VideoActionRecognitionPredictionParams -from .video_classification import VideoClassificationPredictionParams -from .video_object_tracking import VideoObjectTrackingPredictionParams +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) __all__ = ( "ImageClassificationPredictionParams", diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py index 0ec24f828b..12fc5b9a2f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -13,16 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .classification import ClassificationPredictionResult -from .image_object_detection import ImageObjectDetectionPredictionResult -from .image_segmentation import ImageSegmentationPredictionResult -from .tabular_classification import TabularClassificationPredictionResult -from .tabular_regression import TabularRegressionPredictionResult -from .text_extraction import TextExtractionPredictionResult -from .text_sentiment import TextSentimentPredictionResult -from .video_action_recognition import VideoActionRecognitionPredictionResult -from .video_classification import VideoClassificationPredictionResult -from .video_object_tracking import VideoObjectTrackingPredictionResult +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) __all__ = ( "ClassificationPredictionResult", diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py index 765fe6635a..f85b4686a6 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py @@ -57,7 +57,9 @@ AutoMlVideoObjectTracking, AutoMlVideoObjectTrackingInputs, ) -from .export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) __all__ = ( "AutoMlImageClassification", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py index 70980a1a26..c36f147d50 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -13,15 +13,33 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .image_classification import ImageClassificationPredictionInstance -from .image_object_detection import ImageObjectDetectionPredictionInstance -from .image_segmentation import ImageSegmentationPredictionInstance -from .text_classification import TextClassificationPredictionInstance -from .text_extraction import TextExtractionPredictionInstance -from .text_sentiment import TextSentimentPredictionInstance -from .video_action_recognition import VideoActionRecognitionPredictionInstance -from .video_classification import VideoClassificationPredictionInstance -from .video_object_tracking import VideoObjectTrackingPredictionInstance +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) __all__ = ( "ImageClassificationPredictionInstance", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py index eb4d25a1e3..135f3bff54 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -13,12 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .image_classification import ImageClassificationPredictionParams -from .image_object_detection import ImageObjectDetectionPredictionParams -from .image_segmentation import ImageSegmentationPredictionParams -from .video_action_recognition import VideoActionRecognitionPredictionParams -from .video_classification import VideoClassificationPredictionParams -from .video_object_tracking import VideoObjectTrackingPredictionParams +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) __all__ = ( "ImageClassificationPredictionParams", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py index 4df73af8b1..582c0bbe12 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -13,17 +13,39 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .classification import ClassificationPredictionResult -from .image_object_detection import ImageObjectDetectionPredictionResult -from .image_segmentation import ImageSegmentationPredictionResult -from .tabular_classification import TabularClassificationPredictionResult -from .tabular_regression import TabularRegressionPredictionResult -from .text_extraction import TextExtractionPredictionResult -from .text_sentiment import TextSentimentPredictionResult -from .time_series_forecasting import TimeSeriesForecastingPredictionResult -from .video_action_recognition import VideoActionRecognitionPredictionResult -from .video_classification import VideoClassificationPredictionResult -from .video_object_tracking import VideoObjectTrackingPredictionResult +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) __all__ = ( "ClassificationPredictionResult", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py index 83ac9d7c85..7de288bc76 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -62,7 +62,9 @@ AutoMlVideoObjectTracking, AutoMlVideoObjectTrackingInputs, ) -from .export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) __all__ = ( "AutoMlImageClassification", diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index bb6e2a833a..055ceb5de2 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -258,6 +258,7 @@ from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec from .types.machine_resources import MachineSpec +from .types.machine_resources import NfsMount from .types.machine_resources import ResourcesConsumed from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters from .types.metadata_schema import MetadataSchema @@ -826,6 +827,7 @@ "MutateDeployedIndexRequest", "MutateDeployedIndexResponse", "NearestNeighborSearchOperationMetadata", + "NfsMount", "PauseModelDeploymentMonitoringJobRequest", "PipelineJob", "PipelineJobDetail", diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index 4149620f5c..5c2f33f953 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index f6463e865a..d3df58d4a4 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index 227b6b1e7b..8442c54b3c 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -85,6 +85,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -292,5 +293,9 @@ def list_annotations( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("DatasetServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index bca18ceba0..ca284a622c 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -520,5 +520,9 @@ def list_annotations( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("DatasetServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 9bfcc5303e..ac4a38c634 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -804,9 +804,7 @@ async def deploy_model( *, endpoint: str = None, deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[ - endpoint_service.DeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -814,7 +812,6 @@ async def deploy_model( r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -866,7 +863,7 @@ def sample_deploy_model(): This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`): + traffic_split (:class:`Mapping[str, int]`): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -963,9 +960,7 @@ async def undeploy_model( *, endpoint: str = None, deployed_model_id: str = None, - traffic_split: Sequence[ - endpoint_service.UndeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -974,7 +969,6 @@ async def undeploy_model( DeployedModel from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1018,7 +1012,7 @@ def sample_undeploy_model(): This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`): + traffic_split (:class:`Mapping[str, int]`): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index 75adf0a620..63b5173eea 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -1081,9 +1081,7 @@ def deploy_model( *, endpoint: str = None, deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[ - endpoint_service.DeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1091,7 +1089,6 @@ def deploy_model( r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1143,7 +1140,7 @@ def sample_deploy_model(): This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -1239,9 +1236,7 @@ def undeploy_model( *, endpoint: str = None, deployed_model_id: str = None, - traffic_split: Sequence[ - endpoint_service.UndeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1250,7 +1245,6 @@ def undeploy_model( DeployedModel from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1294,7 +1288,7 @@ def sample_undeploy_model(): This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index 97f94b150a..70a0211ecc 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -84,6 +84,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -241,5 +242,9 @@ def undeploy_model( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("EndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py index 8b5fc08275..ad16ecf650 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -436,5 +436,9 @@ def undeploy_model( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("EndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index e4c15811da..e9e02cba66 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Mapping, Optional, AsyncIterable, Awaitable, @@ -244,7 +245,6 @@ async def read_feature_values( entities of an EntityType, please use StreamingReadFeatureValues. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -357,7 +357,6 @@ def streaming_read_feature_values( on their size, data for different entities may be broken up across multiple responses. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py index dde110331a..22db389c17 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -452,7 +452,6 @@ def read_feature_values( entities of an EntityType, please use StreamingReadFeatureValues. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -565,7 +564,6 @@ def streaming_read_feature_values( on their size, data for different entities may be broken up across multiple responses. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py index f8cb3a6337..61e2d46b52 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -80,6 +80,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -165,5 +166,9 @@ def streaming_read_feature_values( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("FeaturestoreOnlineServingServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py index 546f9918f0..92cbb77fa1 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -297,5 +297,9 @@ def streaming_read_feature_values( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("FeaturestoreOnlineServingServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index d1cbca0f11..11c03dea48 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -241,7 +241,6 @@ async def create_featurestore( r"""Creates a new Featurestore in a given project and location. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -723,7 +722,6 @@ async def delete_featurestore( any EntityTypes or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1335,7 +1333,6 @@ async def delete_entity_type( Features or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2215,7 +2212,6 @@ async def import_feature_values( or retention policy. - Online serving cluster is under-provisioned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2345,7 +2341,6 @@ async def batch_read_feature_values( correctness is guaranteed for Feature values of each read instance as of each instance's read timestamp. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2474,7 +2469,6 @@ async def export_feature_values( r"""Exports Feature values from all the entities of a target EntityType. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2599,7 +2593,6 @@ async def search_features( r"""Searches Features matching a query in a given project. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index fb75ae15c0..1246e05d74 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -506,7 +506,6 @@ def create_featurestore( r"""Creates a new Featurestore in a given project and location. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -988,7 +987,6 @@ def delete_featurestore( any EntityTypes or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1600,7 +1598,6 @@ def delete_entity_type( Features or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2480,7 +2477,6 @@ def import_feature_values( or retention policy. - Online serving cluster is under-provisioned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2610,7 +2606,6 @@ def batch_read_feature_values( correctness is guaranteed for Feature values of each read instance as of each instance's read timestamp. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2741,7 +2736,6 @@ def export_feature_values( r"""Exports Feature values from all the entities of a target EntityType. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2866,7 +2860,6 @@ def search_features( r"""Searches Features matching a query in a given project. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py index 9a7737e60a..6e7e2cef88 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -87,6 +87,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -435,5 +436,9 @@ def search_features( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("FeaturestoreServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py index e225ebb50b..a38415caab 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py @@ -843,5 +843,9 @@ def search_features( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("FeaturestoreServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index 6e5912018a..eb1963c306 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -791,7 +791,6 @@ async def deploy_index( DeployedIndex within it. Only non-empty Indexes can be deployed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -923,7 +922,6 @@ async def undeploy_index( DeployedIndex from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1050,7 +1048,6 @@ async def mutate_deployed_index( r"""Update an existing DeployedIndex under an IndexEndpoint. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index 4bfc835496..985a9907aa 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -1032,7 +1032,6 @@ def deploy_index( DeployedIndex within it. Only non-empty Indexes can be deployed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1164,7 +1163,6 @@ def undeploy_index( DeployedIndex from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1291,7 +1289,6 @@ def mutate_deployed_index( r"""Update an existing DeployedIndex under an IndexEndpoint. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index b2f79eff67..cdfcdf00c3 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -84,6 +84,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -258,5 +259,9 @@ def mutate_deployed_index( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index bfef0ef596..eee1cb6fa2 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -480,5 +480,9 @@ def mutate_deployed_index( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("IndexEndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index b5d2cbc59e..8f237cce9a 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -682,7 +682,6 @@ async def delete_index( [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index 892b4e7930..910800fb14 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -926,7 +926,6 @@ def delete_index( [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_service/transports/base.py index 64661ee467..95a5aa9a53 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -83,6 +83,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -211,5 +212,9 @@ def delete_index( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("IndexServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py index 0dd2d14a53..db0405a577 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py @@ -381,5 +381,9 @@ def delete_index( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("IndexServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 32d9212950..b655592a55 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -277,7 +277,6 @@ async def create_custom_job( r"""Creates a CustomJob. A created CustomJob right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -735,7 +734,6 @@ async def cancel_custom_job( [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1260,7 +1258,6 @@ async def cancel_data_labeling_job( r"""Cancels a DataLabelingJob. Success of cancellation is not guaranteed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1805,7 +1802,6 @@ async def cancel_hyperparameter_tuning_job( [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1892,7 +1888,6 @@ async def create_batch_prediction_job( r"""Creates a BatchPredictionJob. A BatchPredictionJob once created will right away be attempted to start. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2220,7 +2215,6 @@ async def delete_batch_prediction_job( r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2353,7 +2347,6 @@ async def cancel_batch_prediction_job( is set to ``CANCELLED``. Any files already outputted by the job are not deleted. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2442,7 +2435,6 @@ async def create_model_deployment_monitoring_job( r"""Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2560,7 +2552,6 @@ async def search_model_deployment_monitoring_stats_anomalies( r"""Searches Model Monitoring Statistics generated within a given time window. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3187,7 +3178,6 @@ async def pause_model_deployment_monitoring_job( [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3276,7 +3266,6 @@ async def resume_model_deployment_monitoring_job( will start to run from next scheduled time. A deleted ModelDeploymentMonitoringJob can't be resumed. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 2c6e8edcf4..4a5ceff8d7 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -694,7 +694,6 @@ def create_custom_job( r"""Creates a CustomJob. A created CustomJob right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1152,7 +1151,6 @@ def cancel_custom_job( [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1677,7 +1675,6 @@ def cancel_data_labeling_job( r"""Cancels a DataLabelingJob. Success of cancellation is not guaranteed. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2230,7 +2227,6 @@ def cancel_hyperparameter_tuning_job( [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2319,7 +2315,6 @@ def create_batch_prediction_job( r"""Creates a BatchPredictionJob. A BatchPredictionJob once created will right away be attempted to start. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2651,7 +2646,6 @@ def delete_batch_prediction_job( r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2786,7 +2780,6 @@ def cancel_batch_prediction_job( is set to ``CANCELLED``. Any files already outputted by the job are not deleted. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2877,7 +2870,6 @@ def create_model_deployment_monitoring_job( r"""Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3001,7 +2993,6 @@ def search_model_deployment_monitoring_stats_anomalies( r"""Searches Model Monitoring Statistics generated within a given time window. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3650,7 +3641,6 @@ def pause_model_deployment_monitoring_job( [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3743,7 +3733,6 @@ def resume_model_deployment_monitoring_job( will start to run from next scheduled time. A deleted ModelDeploymentMonitoringJob can't be resumed. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index d3a9c500ec..1450896589 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -99,6 +99,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -591,5 +592,9 @@ def resume_model_deployment_monitoring_job( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("JobServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index a6c5f955ac..a71ddac0fd 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -1131,5 +1131,9 @@ def resume_model_deployment_monitoring_job( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("JobServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index 2e8efc8907..c9d70b09e5 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -248,7 +248,6 @@ async def create_metadata_store( r"""Initializes a MetadataStore, including allocation of resources. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -592,7 +591,6 @@ async def delete_metadata_store( r"""Deletes a single MetadataStore and all its child resources (Artifacts, Executions, and Contexts). - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1072,7 +1070,7 @@ def sample_update_artifact(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -1738,7 +1736,7 @@ def sample_update_context(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2053,7 +2051,6 @@ async def add_context_artifacts_and_executions( If any of the Artifacts or Executions have already been added to a Context, they are simply skipped. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2179,7 +2176,6 @@ async def add_context_children( cycle or cause any Context to have more than 10 parents, the request will fail with an INVALID_ARGUMENT error. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2290,7 +2286,6 @@ async def query_context_lineage_subgraph( specified Context, connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2752,7 +2747,7 @@ def sample_update_execution(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -3066,7 +3061,6 @@ async def add_execution_events( between the Execution and the Artifact, the Event is skipped. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3176,7 +3170,6 @@ async def query_execution_inputs_and_outputs( this Execution, in the form of LineageSubgraph that also contains the Execution and connecting Events. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3610,7 +3603,6 @@ async def query_artifact_lineage_subgraph( Artifacts and Executions connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py index d5a80ab566..ef81d28d11 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -556,7 +556,6 @@ def create_metadata_store( r"""Initializes a MetadataStore, including allocation of resources. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -900,7 +899,6 @@ def delete_metadata_store( r"""Deletes a single MetadataStore and all its child resources (Artifacts, Executions, and Contexts). - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1380,7 +1378,7 @@ def sample_update_artifact(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2046,7 +2044,7 @@ def sample_update_context(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2361,7 +2359,6 @@ def add_context_artifacts_and_executions( If any of the Artifacts or Executions have already been added to a Context, they are simply skipped. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2491,7 +2488,6 @@ def add_context_children( cycle or cause any Context to have more than 10 parents, the request will fail with an INVALID_ARGUMENT error. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2602,7 +2598,6 @@ def query_context_lineage_subgraph( specified Context, connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3066,7 +3061,7 @@ def sample_update_execution(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -3380,7 +3375,6 @@ def add_execution_events( between the Execution and the Artifact, the Event is skipped. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3490,7 +3484,6 @@ def query_execution_inputs_and_outputs( this Execution, in the form of LineageSubgraph that also contains the Execution and connecting Events. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3928,7 +3921,6 @@ def query_artifact_lineage_subgraph( Artifacts and Executions connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py index 94150c6887..d95e06341e 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -92,6 +92,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -620,5 +621,9 @@ def query_artifact_lineage_subgraph( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("MetadataServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py index b728ffbf97..5f3a7c6c3b 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -1134,5 +1134,9 @@ def query_artifact_lineage_subgraph( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("MetadataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index c2a25b2b07..f2b774fc2c 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -235,7 +235,6 @@ async def search_migratable_resources( ml.googleapis.com that can be migrated to Vertex AI's given location. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -352,7 +351,6 @@ async def batch_migrate_resources( automl.googleapis.com, and datalabeling.googleapis.com to Vertex AI. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 5d03f9ce7b..4a34966ffa 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -214,40 +214,40 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -572,7 +572,6 @@ def search_migratable_resources( ml.googleapis.com that can be migrated to Vertex AI's given location. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -691,7 +690,6 @@ def batch_migrate_resources( automl.googleapis.com, and datalabeling.googleapis.com to Vertex AI. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index 62f51f8a78..b6eca7b872 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -82,6 +82,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -169,5 +170,9 @@ def batch_migrate_resources( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("MigrationServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index 761441c69b..7f9f450863 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -310,5 +310,9 @@ def batch_migrate_resources( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("MigrationServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 4d9c3958dc..cf6d6b4e65 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -595,8 +595,29 @@ def sample_update_model(): The request object. Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. model (:class:`google.cloud.aiplatform_v1.types.Model`): - Required. The Model which replaces - the resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -683,7 +704,6 @@ async def delete_model( [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -809,7 +829,6 @@ async def export_model( least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index 1233e3465e..725a0d8b1c 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -899,8 +899,29 @@ def sample_update_model(): The request object. Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces - the resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -987,7 +1008,6 @@ def delete_model( [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1113,7 +1133,6 @@ def export_model( least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index daad09e9dc..83a4137a16 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -87,6 +87,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -314,5 +315,9 @@ def list_model_evaluation_slices( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("ModelServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index f49c150d84..c7e299f79b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -560,5 +560,9 @@ def list_model_evaluation_slices( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("ModelServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index e77d902c8f..ba8850b5a6 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -253,7 +253,6 @@ async def create_training_pipeline( r"""Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -711,7 +710,6 @@ async def cancel_training_pipeline( [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -799,7 +797,6 @@ async def create_pipeline_job( r"""Creates a PipelineJob. A PipelineJob will run immediately when created. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1253,7 +1250,6 @@ async def cancel_pipeline_job( [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 2330c0cec7..a3da39bb5d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -636,7 +636,6 @@ def create_training_pipeline( r"""Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1094,7 +1093,6 @@ def cancel_training_pipeline( [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1182,7 +1180,6 @@ def create_pipeline_job( r"""Creates a PipelineJob. A PipelineJob will run immediately when created. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1636,7 +1633,6 @@ def cancel_pipeline_job( [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index acaef85bed..1c4076e641 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -87,6 +87,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -295,5 +296,9 @@ def cancel_pipeline_job( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("PipelineServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 195b4697e3..15edc32c41 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -559,5 +559,9 @@ def cancel_pipeline_job( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 8b726d18f2..e3a8af7438 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -362,7 +362,6 @@ async def raw_predict( [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this prediction. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -545,7 +544,6 @@ async def explain( populated. Only deployed AutoML tabular Models have explanation_spec. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index fad783c3f2..c239abdc87 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -606,7 +606,6 @@ def raw_predict( [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this prediction. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -789,7 +788,6 @@ def explain( populated. Only deployed AutoML tabular Models have explanation_spec. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index 9e8eac3a62..b74e081240 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -81,6 +81,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -180,5 +181,9 @@ def explain( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("PredictionServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index bf18fb1443..f39ef1f4a3 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -334,5 +334,9 @@ def explain( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index f67db2b65a..c610573b18 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -584,7 +584,6 @@ async def delete_specialist_pool( r"""Deletes a SpecialistPool as well as all Specialists in the pool. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index 8d57a0ddde..bf7d41b7c7 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -801,7 +801,6 @@ def delete_specialist_pool( r"""Deletes a SpecialistPool as well as all Specialists in the pool. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py index 8fc09ae153..3f68083c5a 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -83,6 +83,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -214,5 +215,9 @@ def update_specialist_pool( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("SpecialistPoolServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py index b3a5e5fa5e..bbb4e8f10c 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -396,5 +396,9 @@ def update_specialist_pool( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("SpecialistPoolServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index d1c3cdb21e..8e0d7b96fb 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Mapping, Optional, AsyncIterable, Awaitable, @@ -2122,7 +2123,6 @@ async def batch_create_tensorboard_time_series( r"""Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2818,7 +2818,6 @@ async def batch_read_tensorboard_time_series_data( Otherwise, that limit number of data points will be randomly selected from this time series and returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -2928,7 +2927,6 @@ async def read_tensorboard_time_series_data( from this time series and returned. This value can be changed by changing max_data_points, which can't be greater than 10k. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3030,7 +3028,6 @@ def read_tensorboard_blob_data( project's Cloud Storage bucket without users having to obtain Cloud Storage access permission. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3137,7 +3134,6 @@ async def write_tensorboard_experiment_data( TensorboardTimeSeries in multiple TensorboardRun's. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3254,7 +3250,6 @@ async def write_tensorboard_run_data( TensorboardTimeSeries under a TensorboardRun. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3376,7 +3371,6 @@ async def export_tensorboard_time_series_data( r"""Exports a TensorboardTimeSeries' data. Data is returned in paginated responses. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py index 57249fcf2a..16d42bbfb8 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -2420,7 +2420,6 @@ def batch_create_tensorboard_time_series( r"""Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3140,7 +3139,6 @@ def batch_read_tensorboard_time_series_data( Otherwise, that limit number of data points will be randomly selected from this time series and returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3256,7 +3254,6 @@ def read_tensorboard_time_series_data( from this time series and returned. This value can be changed by changing max_data_points, which can't be greater than 10k. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3362,7 +3359,6 @@ def read_tensorboard_blob_data( project's Cloud Storage bucket without users having to obtain Cloud Storage access permission. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3471,7 +3467,6 @@ def write_tensorboard_experiment_data( TensorboardTimeSeries in multiple TensorboardRun's. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3592,7 +3587,6 @@ def write_tensorboard_run_data( TensorboardTimeSeries under a TensorboardRun. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -3716,7 +3710,6 @@ def export_tensorboard_time_series_data( r"""Exports a TensorboardTimeSeries' data. Data is returned in paginated responses. - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py index 273f73a5fe..af82ace255 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py @@ -96,6 +96,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -606,5 +607,9 @@ def export_tensorboard_time_series_data( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("TensorboardServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py index 0a72074307..41ba3885a0 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py @@ -1109,5 +1109,9 @@ def export_tensorboard_time_series_data( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("TensorboardServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index 5d1f3f6061..a6da2fcd73 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -228,7 +228,6 @@ async def create_study( r"""Creates a Study. A resource name will be generated after creation of the Study. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -435,7 +434,6 @@ async def list_studies( r"""Lists all the studies in a region for an associated project. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -630,7 +628,6 @@ async def lookup_study( r"""Looks a study up using the user-defined display_name field instead of the fully qualified resource name. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -729,7 +726,6 @@ async def suggest_trials( long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1137,7 +1133,6 @@ async def add_trial_measurement( Trial. This measurement is assumed to have been taken before the Trial is complete. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1378,7 +1373,6 @@ async def check_trial_early_stopping_state( will contain a [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1550,7 +1544,6 @@ async def list_optimal_trials( pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index fff1e56956..89f15d2fdc 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -492,7 +492,6 @@ def create_study( r"""Creates a Study. A resource name will be generated after creation of the Study. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -699,7 +698,6 @@ def list_studies( r"""Lists all the studies in a region for an associated project. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -894,7 +892,6 @@ def lookup_study( r"""Looks a study up using the user-defined display_name field instead of the fully qualified resource name. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -993,7 +990,6 @@ def suggest_trials( long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1402,7 +1398,6 @@ def add_trial_measurement( Trial. This measurement is assumed to have been taken before the Trial is complete. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1645,7 +1640,6 @@ def check_trial_early_stopping_state( will contain a [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - .. code-block:: python from google.cloud import aiplatform_v1 @@ -1821,7 +1815,6 @@ def list_optimal_trials( pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency - .. code-block:: python from google.cloud import aiplatform_v1 diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py index 6afa674411..16086558c0 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py @@ -85,6 +85,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -355,5 +356,9 @@ def list_optimal_trials( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("VizierServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py index e68d07de59..cead668df9 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -664,5 +664,9 @@ def list_optimal_trials( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("VizierServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 0a8934d961..5bd19073ed 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -13,12 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .annotation import Annotation -from .annotation_spec import AnnotationSpec -from .artifact import Artifact -from .batch_prediction_job import BatchPredictionJob -from .completion_stats import CompletionStats -from .context import Context +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) from .custom_job import ( ContainerSpec, CustomJob, @@ -27,7 +39,9 @@ Scheduling, WorkerPoolSpec, ) -from .data_item import DataItem +from .data_item import ( + DataItem, +) from .data_labeling_job import ( ActiveLearningConfig, DataLabelingJob, @@ -59,9 +73,15 @@ ListDatasetsResponse, UpdateDatasetRequest, ) -from .deployed_index_ref import DeployedIndexRef -from .deployed_model_ref import DeployedModelRef -from .encryption_spec import EncryptionSpec +from .deployed_index_ref import ( + DeployedIndexRef, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) from .endpoint import ( DeployedModel, Endpoint, @@ -83,10 +103,18 @@ UndeployModelResponse, UpdateEndpointRequest, ) -from .entity_type import EntityType -from .env_var import EnvVar -from .event import Event -from .execution import Execution +from .entity_type import ( + EntityType, +) +from .env_var import ( + EnvVar, +) +from .event import ( + Event, +) +from .execution import ( + Execution, +) from .explanation import ( Attribution, BlurBaselineConfig, @@ -102,15 +130,25 @@ SmoothGradConfig, XraiAttribution, ) -from .explanation_metadata import ExplanationMetadata -from .feature import Feature -from .feature_monitoring_stats import FeatureStatsAnomaly +from .explanation_metadata import ( + ExplanationMetadata, +) +from .feature import ( + Feature, +) +from .feature_monitoring_stats import ( + FeatureStatsAnomaly, +) from .feature_selector import ( FeatureSelector, IdMatcher, ) -from .featurestore import Featurestore -from .featurestore_monitoring import FeaturestoreMonitoringConfig +from .featurestore import ( + Featurestore, +) +from .featurestore_monitoring import ( + FeaturestoreMonitoringConfig, +) from .featurestore_online_service import ( FeatureValue, FeatureValueList, @@ -158,8 +196,12 @@ UpdateFeaturestoreOperationMetadata, UpdateFeaturestoreRequest, ) -from .hyperparameter_tuning_job import HyperparameterTuningJob -from .index import Index +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .index import ( + Index, +) from .index_endpoint import ( DeployedIndex, DeployedIndexAuthConfig, @@ -243,7 +285,9 @@ UpdateModelDeploymentMonitoringJobOperationMetadata, UpdateModelDeploymentMonitoringJobRequest, ) -from .lineage_subgraph import LineageSubgraph +from .lineage_subgraph import ( + LineageSubgraph, +) from .machine_resources import ( AutomaticResources, AutoscalingMetricSpec, @@ -251,10 +295,15 @@ DedicatedResources, DiskSpec, MachineSpec, + NfsMount, ResourcesConsumed, ) -from .manual_batch_tuning_parameters import ManualBatchTuningParameters -from .metadata_schema import MetadataSchema +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .metadata_schema import ( + MetadataSchema, +) from .metadata_service import ( AddContextArtifactsAndExecutionsRequest, AddContextArtifactsAndExecutionsResponse, @@ -304,8 +353,12 @@ UpdateContextRequest, UpdateExecutionRequest, ) -from .metadata_store import MetadataStore -from .migratable_resource import MigratableResource +from .metadata_store import ( + MetadataStore, +) +from .migratable_resource import ( + MigratableResource, +) from .migration_service import ( BatchMigrateResourcesOperationMetadata, BatchMigrateResourcesRequest, @@ -329,8 +382,12 @@ ModelMonitoringStatsAnomalies, ModelDeploymentMonitoringObjectiveType, ) -from .model_evaluation import ModelEvaluation -from .model_evaluation_slice import ModelEvaluationSlice +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) from .model_monitoring import ( ModelMonitoringAlertConfig, ModelMonitoringObjectiveConfig, @@ -388,7 +445,9 @@ PredictResponse, RawPredictRequest, ) -from .specialist_pool import SpecialistPool +from .specialist_pool import ( + SpecialistPool, +) from .specialist_pool_service import ( CreateSpecialistPoolOperationMetadata, CreateSpecialistPoolRequest, @@ -405,7 +464,9 @@ StudySpec, Trial, ) -from .tensorboard import Tensorboard +from .tensorboard import ( + Tensorboard, +) from .tensorboard_data import ( Scalar, TensorboardBlob, @@ -414,8 +475,12 @@ TimeSeriesData, TimeSeriesDataPoint, ) -from .tensorboard_experiment import TensorboardExperiment -from .tensorboard_run import TensorboardRun +from .tensorboard_experiment import ( + TensorboardExperiment, +) +from .tensorboard_run import ( + TensorboardRun, +) from .tensorboard_service import ( BatchCreateTensorboardRunsRequest, BatchCreateTensorboardRunsResponse, @@ -460,7 +525,9 @@ WriteTensorboardRunDataRequest, WriteTensorboardRunDataResponse, ) -from .tensorboard_time_series import TensorboardTimeSeries +from .tensorboard_time_series import ( + TensorboardTimeSeries, +) from .training_pipeline import ( FilterSplit, FractionSplit, @@ -476,9 +543,15 @@ Int64Array, StringArray, ) -from .unmanaged_container_model import UnmanagedContainerModel -from .user_action_reference import UserActionReference -from .value import Value +from .unmanaged_container_model import ( + UnmanagedContainerModel, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) from .vizier_service import ( AddTrialMeasurementRequest, CheckTrialEarlyStoppingStateMetatdata, @@ -714,6 +787,7 @@ "DedicatedResources", "DiskSpec", "MachineSpec", + "NfsMount", "ResourcesConsumed", "ManualBatchTuningParameters", "MetadataSchema", diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py index 66b42cb6f6..db2eb8c3cf 100644 --- a/google/cloud/aiplatform_v1/types/annotation.py +++ b/google/cloud/aiplatform_v1/types/annotation.py @@ -61,7 +61,7 @@ class Annotation(proto.Message): "overwrite" update happens. annotation_source (google.cloud.aiplatform_v1.types.UserActionReference): Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1.types.Annotation.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Annotations. diff --git a/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py index dee19b59b7..853ae4b0cb 100644 --- a/google/cloud/aiplatform_v1/types/artifact.py +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -45,7 +45,7 @@ class Artifact(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Artifact.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 6b8545676b..c5adbf4251 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -179,7 +179,7 @@ class BatchPredictionJob(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the BatchPredictionJob was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/context.py b/google/cloud/aiplatform_v1/types/context.py index 1c9bfbdca2..f65d3613a5 100644 --- a/google/cloud/aiplatform_v1/types/context.py +++ b/google/cloud/aiplatform_v1/types/context.py @@ -41,7 +41,7 @@ class Context(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Context.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 6e16964798..1b269d4fc2 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -72,7 +72,7 @@ class CustomJob(proto.Message): error (google.rpc.status_pb2.Status): Output only. Only populated when job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.CustomJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 @@ -87,7 +87,7 @@ class CustomJob(proto.Message): CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. - web_access_uris (Sequence[google.cloud.aiplatform_v1.types.CustomJob.WebAccessUrisEntry]): + web_access_uris (Mapping[str, str]): Output only. URIs for accessing `interactive shells `__ (one URI for each training node). Only available if @@ -199,6 +199,15 @@ class CustomJobSpec(proto.Message): If this field is left unspecified, the job is not peered with any network. + reserved_ip_ranges (Sequence[str]): + Optional. A list of names for the reserved ip ranges under + the VPC network that can be used for this job. + + If set, we will deploy the job within the provided ip + ranges. Otherwise, the job will be deployed to any ip ranges + under the provided VPC network. + + Example: ['vertex-ai-ip-range']. base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For @@ -265,6 +274,10 @@ class CustomJobSpec(proto.Message): proto.STRING, number=5, ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=13, + ) base_output_directory = proto.Field( proto.MESSAGE, number=6, @@ -305,6 +318,8 @@ class WorkerPoolSpec(proto.Message): replica_count (int): Optional. The number of worker replicas to use for this worker pool. + nfs_mounts (Sequence[google.cloud.aiplatform_v1.types.NfsMount]): + Optional. List of NFS mount spec. disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): Disk spec. """ @@ -330,6 +345,11 @@ class WorkerPoolSpec(proto.Message): proto.INT64, number=2, ) + nfs_mounts = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=machine_resources.NfsMount, + ) disk_spec = proto.Field( proto.MESSAGE, number=5, diff --git a/google/cloud/aiplatform_v1/types/data_item.py b/google/cloud/aiplatform_v1/types/data_item.py index 42df53a1d5..d97d22360e 100644 --- a/google/cloud/aiplatform_v1/types/data_item.py +++ b/google/cloud/aiplatform_v1/types/data_item.py @@ -41,7 +41,7 @@ class DataItem(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DataItem was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.DataItem.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py index 3a4c6a096d..0799b0e85d 100644 --- a/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -52,7 +52,7 @@ class DataLabelingJob(proto.Message): Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.AnnotationLabelsEntry]): + annotation_labels (Mapping[str, str]): Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 @@ -101,7 +101,7 @@ class DataLabelingJob(proto.Message): Output only. DataLabelingJob errors. It is only populated when job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your DataLabelingJobs. diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index f70c531988..3eeaa861d2 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -64,7 +64,7 @@ class Dataset(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Datasets. @@ -151,7 +151,7 @@ class ImportDataConfig(proto.Message): input content. This field is a member of `oneof`_ ``source``. - data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]): + data_item_labels (Mapping[str, str]): Labels that will be applied to newly imported DataItems. If an identical DataItem as one being imported already exists in the Dataset, then these labels will be appended to these diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 36330b806f..1c21e85e73 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -54,7 +54,7 @@ class Endpoint(proto.Message): and [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -68,7 +68,7 @@ class Endpoint(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Endpoint.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 @@ -108,7 +108,8 @@ class Endpoint(proto.Message): ``{project}`` is a project number, as in ``12345``, and ``{network}`` is network name. enable_private_service_connect (bool): - If true, expose the Endpoint via private service connect. + Deprecated: If true, expose the Endpoint via private service + connect. Only one of the fields, [network][google.cloud.aiplatform.v1.Endpoint.network] or @@ -390,7 +391,7 @@ class PredictRequestResponseLoggingConfig(proto.Message): Percentage of requests to be logged, expressed as a fraction in range(0,1]. bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - BigQuery table for logging. If only given project, a new + BigQuery table for logging. If only given a project, a new dataset will be created with name ``logging__`` where will be made BigQuery-dataset-name compatible (e.g. most special diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index 715b8d05cc..04a0870e23 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -278,7 +278,7 @@ class DeployModelRequest(proto.Message): must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -357,7 +357,7 @@ class UndeployModelRequest(proto.Message): deployed_model_id (str): Required. The ID of the DeployedModel to be undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being diff --git a/google/cloud/aiplatform_v1/types/entity_type.py b/google/cloud/aiplatform_v1/types/entity_type.py index 8b52432812..7b30f812b8 100644 --- a/google/cloud/aiplatform_v1/types/entity_type.py +++ b/google/cloud/aiplatform_v1/types/entity_type.py @@ -51,7 +51,7 @@ class EntityType(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this EntityType was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.EntityType.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/event.py b/google/cloud/aiplatform_v1/types/event.py index d99f8ba8c1..89e76ae748 100644 --- a/google/cloud/aiplatform_v1/types/event.py +++ b/google/cloud/aiplatform_v1/types/event.py @@ -41,7 +41,7 @@ class Event(proto.Message): Output only. Time the Event occurred. type_ (google.cloud.aiplatform_v1.types.Event.Type): Required. The type of the Event. - labels (Sequence[google.cloud.aiplatform_v1.types.Event.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py index ab1450da59..9d157b8ba2 100644 --- a/google/cloud/aiplatform_v1/types/execution.py +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -48,7 +48,7 @@ class Execution(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Execution.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/explanation.py b/google/cloud/aiplatform_v1/types/explanation.py index cbe77fc7e2..f1c7992ae5 100644 --- a/google/cloud/aiplatform_v1/types/explanation.py +++ b/google/cloud/aiplatform_v1/types/explanation.py @@ -676,7 +676,7 @@ class ExplanationMetadataOverride(proto.Message): time. Attributes: - inputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadataOverride.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadataOverride.InputMetadataOverride]): Required. Overrides the [input metadata][google.cloud.aiplatform.v1.ExplanationMetadata.inputs] of the features. The key is the name of the feature to be diff --git a/google/cloud/aiplatform_v1/types/explanation_metadata.py b/google/cloud/aiplatform_v1/types/explanation_metadata.py index b62e337689..c528cf94f4 100644 --- a/google/cloud/aiplatform_v1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1/types/explanation_metadata.py @@ -31,7 +31,7 @@ class ExplanationMetadata(proto.Message): explanation. Attributes: - inputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadata.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata]): Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. @@ -48,7 +48,7 @@ class ExplanationMetadata(proto.Message): For custom images, the key must match with the key in [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]. - outputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadata.OutputsEntry]): + outputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadata.OutputMetadata]): Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys diff --git a/google/cloud/aiplatform_v1/types/feature.py b/google/cloud/aiplatform_v1/types/feature.py index fd0f5d733a..e86df5b6ed 100644 --- a/google/cloud/aiplatform_v1/types/feature.py +++ b/google/cloud/aiplatform_v1/types/feature.py @@ -52,7 +52,7 @@ class Feature(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this EntityType was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.Feature.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/featurestore.py b/google/cloud/aiplatform_v1/types/featurestore.py index d6a758649d..c6cb0022a0 100644 --- a/google/cloud/aiplatform_v1/types/featurestore.py +++ b/google/cloud/aiplatform_v1/types/featurestore.py @@ -46,7 +46,7 @@ class Featurestore(proto.Message): Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Featurestore.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 @@ -61,8 +61,10 @@ class Featurestore(proto.Message): System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. online_serving_config (google.cloud.aiplatform_v1.types.Featurestore.OnlineServingConfig): - Required. Config for online serving - resources. + Optional. Config for online storage + resources. If unset, the featurestore will not + have an online store and cannot be used for + online serving. state (google.cloud.aiplatform_v1.types.Featurestore.State): Output only. State of the featurestore. encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): @@ -73,7 +75,7 @@ class Featurestore(proto.Message): """ class State(proto.Enum): - r"""Possible states a Featurestore can have.""" + r"""Possible states a featurestore can have.""" STATE_UNSPECIFIED = 0 STABLE = 1 UPDATING = 2 @@ -84,11 +86,12 @@ class OnlineServingConfig(proto.Message): Attributes: fixed_node_count (int): - The number of nodes for each cluster. The number of nodes - will not scale automatically but can be scaled manually by - providing different values when updating. Only one of - ``fixed_node_count`` and ``scaling`` can be set. Setting one - will reset the other. + The number of nodes for the online store. The + number of nodes doesn't scale automatically, but + you can manually update the number of nodes. If + set to 0, the featurestore will not have an + online store and cannot be used for online + serving. """ fixed_node_count = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py index af8386c55c..55aa325755 100644 --- a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -84,7 +84,7 @@ class HyperparameterTuningJob(proto.Message): error (google.rpc.status_pb2.Status): Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize HyperparameterTuningJobs. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/index.py b/google/cloud/aiplatform_v1/types/index.py index 5068bf6075..c119df7780 100644 --- a/google/cloud/aiplatform_v1/types/index.py +++ b/google/cloud/aiplatform_v1/types/index.py @@ -66,7 +66,7 @@ class Index(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Index.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index bd9324c224..6cfbc20e44 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -52,7 +52,7 @@ class IndexEndpoint(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.IndexEndpoint.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your IndexEndpoints. Label keys and values can be no longer than 64 @@ -81,19 +81,18 @@ class IndexEndpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. - Only one of the fields, [network][google.cloud.aiplatform.v1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], - can be set. + and + [private_service_connect_config][google.cloud.aiplatform.v1.IndexEndpoint.private_service_connect_config] + are mutually exclusive. `Format `__: projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. enable_private_service_connect (bool): - Optional. If true, expose the IndexEndpoint via private - service connect. + Optional. Deprecated: If true, expose the IndexEndpoint via + private service connect. Only one of the fields, [network][google.cloud.aiplatform.v1.IndexEndpoint.network] diff --git a/google/cloud/aiplatform_v1/types/job_state.py b/google/cloud/aiplatform_v1/types/job_state.py index 3fc8a20f39..db3ee682c3 100644 --- a/google/cloud/aiplatform_v1/types/job_state.py +++ b/google/cloud/aiplatform_v1/types/job_state.py @@ -36,6 +36,7 @@ class JobState(proto.Enum): JOB_STATE_CANCELLED = 7 JOB_STATE_PAUSED = 8 JOB_STATE_EXPIRED = 9 + JOB_STATE_UPDATING = 10 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index 7fb8f910f5..23a5bb2187 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -27,6 +27,7 @@ "BatchDedicatedResources", "ResourcesConsumed", "DiskSpec", + "NfsMount", "AutoscalingMetricSpec", }, ) @@ -107,6 +108,12 @@ class DedicatedResources(proto.Message): will use [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. + + The value of this field impacts the charge against Vertex + CPU and GPU quotas. Specifically, you will be charged for + (max_replica_count \* number of cores in the selected + machine type) and (max_replica_count \* number of GPUs per + replica in the selected machine type). autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's @@ -276,6 +283,36 @@ class DiskSpec(proto.Message): ) +class NfsMount(proto.Message): + r"""Represents a mount configuration for Network File System + (NFS) to mount. + + Attributes: + server (str): + Required. IP address of the NFS server. + path (str): + Required. Source path exported from NFS server. Has to start + with '/', and combined with the ip address, it indicates the + source mount path in the form of ``server:path`` + mount_point (str): + Required. Destination mount path. The NFS will be mounted + for the user under /mnt/nfs/ + """ + + server = proto.Field( + proto.STRING, + number=1, + ) + path = proto.Field( + proto.STRING, + number=2, + ) + mount_point = proto.Field( + proto.STRING, + number=3, + ) + + class AutoscalingMetricSpec(proto.Message): r"""The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so diff --git a/google/cloud/aiplatform_v1/types/metadata_service.py b/google/cloud/aiplatform_v1/types/metadata_service.py index 6c0c1ff42d..30a48e00f3 100644 --- a/google/cloud/aiplatform_v1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1/types/metadata_service.py @@ -423,7 +423,7 @@ class UpdateArtifactRequest(proto.Message): Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): @@ -718,7 +718,7 @@ class UpdateContextRequest(proto.Message): field is used to identify the Context to be updated. Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): @@ -1110,7 +1110,7 @@ class UpdateExecutionRequest(proto.Message): Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 1b069101a8..18b3c95542 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -236,7 +236,7 @@ class Model(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Model.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index cca9c571b4..371e179b94 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -78,6 +78,9 @@ class ModelDeploymentMonitoringJob(proto.Message): schedule_state (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): Output only. Schedule state when the monitoring job is in Running state. + latest_monitoring_pipeline_metadata (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.LatestMonitoringPipelineMetadata): + Output only. Latest triggered monitoring + pipeline metadata. model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveConfig]): Required. The config for monitoring objectives. This is a per DeployedModel config. @@ -131,7 +134,7 @@ class ModelDeploymentMonitoringJob(proto.Message): the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600} indicates ttl = 1 day. - labels (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. @@ -180,6 +183,29 @@ class MonitoringScheduleState(proto.Enum): OFFLINE = 2 RUNNING = 3 + class LatestMonitoringPipelineMetadata(proto.Message): + r"""All metadata of most recent monitoring pipelines. + + Attributes: + run_time (google.protobuf.timestamp_pb2.Timestamp): + The time that most recent monitoring + pipelines that is related to this run. + status (google.rpc.status_pb2.Status): + The status of the most recent monitoring + pipeline. + """ + + run_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + name = proto.Field( proto.STRING, number=1, @@ -202,6 +228,11 @@ class MonitoringScheduleState(proto.Enum): number=5, enum=MonitoringScheduleState, ) + latest_monitoring_pipeline_metadata = proto.Field( + proto.MESSAGE, + number=25, + message=LatestMonitoringPipelineMetadata, + ) model_deployment_monitoring_objective_configs = proto.RepeatedField( proto.MESSAGE, number=6, diff --git a/google/cloud/aiplatform_v1/types/model_monitoring.py b/google/cloud/aiplatform_v1/types/model_monitoring.py index bf08c76c01..4987479e5b 100644 --- a/google/cloud/aiplatform_v1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1/types/model_monitoring.py @@ -30,7 +30,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): - r"""Next ID: 7 + r"""Next ID: 8 Attributes: training_dataset (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingDataset): @@ -131,14 +131,14 @@ class TrainingPredictionSkewDetectionConfig(proto.Message): parameters. Attributes: - skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): + skew_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. - attribution_score_skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.AttributionScoreSkewThresholdsEntry]): + attribution_score_skew_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training @@ -162,14 +162,14 @@ class PredictionDriftDetectionConfig(proto.Message): r"""The config for Prediction data drift detection. Attributes: - drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): + drift_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. - attribution_score_drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.AttributionScoreDriftThresholdsEntry]): + attribution_score_drift_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index e5839c0f98..2257360d5a 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -234,8 +234,28 @@ class UpdateModelRequest(proto.Message): Attributes: model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces the - resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, refers to + a version specific update. + 2. model.name without the @ value, e.g. models/123, refers + to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a model + update. + 4. Supported model fields: display_name, description; + supported version-specific fields: version_description. + Labels are supported in both scenarios. Both the model + labels and the version labels are merged when a model is + returned. When updating labels, if the request is for + model-specific update, model label gets updated. + Otherwise, version labels get updated. + 5. A model name or model version name fields update mismatch + will cause a precondition error. + 6. One request cannot update both the model and the version + fields. You must update them separately. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index ac4cc0f415..cbfd7335e7 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -58,7 +58,7 @@ class PipelineJob(proto.Message): Output only. Timestamp when this PipelineJob was most recently updated. pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. + The spec of the pipeline. state (google.cloud.aiplatform_v1.types.PipelineState): Output only. The detailed state of the job. job_detail (google.cloud.aiplatform_v1.types.PipelineJobDetail): @@ -68,7 +68,7 @@ class PipelineJob(proto.Message): Output only. The error that occurred during pipeline execution. Only populated when the pipeline's state is FAILED or CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 @@ -115,7 +115,7 @@ class RuntimeConfig(proto.Message): r"""The runtime config of a PipelineJob. Attributes: - parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + parameters (Mapping[str, google.cloud.aiplatform_v1.types.Value]): Deprecated. Use [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1.PipelineJob.RuntimeConfig.parameter_values] instead. The runtime parameters of the PipelineJob. The @@ -135,7 +135,7 @@ class RuntimeConfig(proto.Message): specified output directory. The service account specified in this pipeline must have the ``storage.objects.get`` and ``storage.objects.create`` permissions for this bucket. - parameter_values (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParameterValuesEntry]): + parameter_values (Mapping[str, google.protobuf.struct_pb2.Value]): The runtime parameters of the PipelineJob. The parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] @@ -302,10 +302,10 @@ class PipelineTaskDetail(proto.Message): Output only. A list of task status. This field keeps a record of task status evolving over time. - inputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1.types.PipelineTaskDetail.ArtifactList]): Output only. The runtime input artifacts of the task. - outputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.OutputsEntry]): + outputs (Mapping[str, google.cloud.aiplatform_v1.types.PipelineTaskDetail.ArtifactList]): Output only. The runtime output artifacts of the task. """ diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 46139910ed..1b02ad9ce3 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -284,6 +284,10 @@ class ListPipelineJobsRequest(proto.Message): ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version_name``: Supports ``=``, + ``!=`` comparisons, and ``:`` wildcard. Filter expressions can be combined together using logical operators (``AND`` & ``OR``). For example: diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index 503760a955..329b499d5d 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -139,7 +139,7 @@ class Trial(proto.Message): Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. - web_access_uris (Sequence[google.cloud.aiplatform_v1.types.Trial.WebAccessUrisEntry]): + web_access_uris (Mapping[str, str]): Output only. URIs for accessing `interactive shells `__ (one URI for each training node). Only available if this @@ -270,6 +270,11 @@ class StudySpec(proto.Message): The automated early stopping spec using median rule. + This field is a member of `oneof`_ ``automated_stopping_spec``. + convex_automated_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.ConvexAutomatedStoppingSpec): + The automated early stopping spec using + convex stopping rule. + This field is a member of `oneof`_ ``automated_stopping_spec``. metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]): Required. Metric specs for the Study. @@ -717,6 +722,77 @@ class MedianAutomatedStoppingSpec(proto.Message): number=1, ) + class ConvexAutomatedStoppingSpec(proto.Message): + r"""Configuration for ConvexAutomatedStoppingSpec. When there are enough + completed trials (configured by min_measurement_count), for pending + trials with enough measurements and steps, the policy first computes + an overestimate of the objective value at max_num_steps according to + the slope of the incomplete objective value curve. No prediction can + be made if the curve is completely flat. If the overestimation is + worse than the best objective value of the completed trials, this + pending trial will be early-stopped, but a last measurement will be + added to the pending trial with max_num_steps and predicted + objective value from the autoregression model. + + Attributes: + max_step_count (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. If not defined, it will + learn it from the completed trials. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_step_count (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with step_count > + min_step_count won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_step_count is set to be one-tenth of + the max_step_count. When use_elapsed_duration is true, this + field is set to the minimum elapsed seconds. + min_measurement_count (int): + The minimal number of measurements in a Trial. + Early-stopping checks will not trigger if less than + min_measurement_count+1 completed trials or pending trials + with less than min_measurement_count measurements. If not + defined, the default value is 5. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_elapsed_duration (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If + use_elapsed_duration==false, the early stopping decision is + made according to the predicted objective values according + to the target steps. If use_elapsed_duration==true, + elapsed_secs is used instead of steps. Also, in this case, + the parameters max_num_steps and min_num_steps are + overloaded to contain max_elapsed_seconds and + min_elapsed_seconds. + """ + + max_step_count = proto.Field( + proto.INT64, + number=1, + ) + min_step_count = proto.Field( + proto.INT64, + number=2, + ) + min_measurement_count = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_elapsed_duration = proto.Field( + proto.BOOL, + number=5, + ) + decay_curve_stopping_spec = proto.Field( proto.MESSAGE, number=4, @@ -729,6 +805,12 @@ class MedianAutomatedStoppingSpec(proto.Message): oneof="automated_stopping_spec", message=MedianAutomatedStoppingSpec, ) + convex_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=9, + oneof="automated_stopping_spec", + message=ConvexAutomatedStoppingSpec, + ) metrics = proto.RepeatedField( proto.MESSAGE, number=1, diff --git a/google/cloud/aiplatform_v1/types/tensorboard.py b/google/cloud/aiplatform_v1/types/tensorboard.py index 3029527291..c50d38acf5 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1/types/tensorboard.py @@ -61,7 +61,7 @@ class Tensorboard(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Tensorboard was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.Tensorboard.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py index 0e6746c715..a4d7d49db2 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py @@ -46,7 +46,7 @@ class TensorboardExperiment(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this TensorboardExperiment was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TensorboardExperiment.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Datasets. diff --git a/google/cloud/aiplatform_v1/types/tensorboard_run.py b/google/cloud/aiplatform_v1/types/tensorboard_run.py index 0469519fcb..c8ca4bc3be 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_run.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_run.py @@ -48,7 +48,7 @@ class TensorboardRun(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this TensorboardRun was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your TensorboardRuns. diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index fea119c5a0..f059979edd 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -128,7 +128,7 @@ class TrainingPipeline(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the TrainingPipeline was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 905c1b778b..6bf3428e83 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -93,6 +93,7 @@ from .types.encryption_spec import EncryptionSpec from .types.endpoint import DeployedModel from .types.endpoint import Endpoint +from .types.endpoint import PredictRequestResponseLoggingConfig from .types.endpoint import PrivateEndpoints from .types.endpoint_service import CreateEndpointOperationMetadata from .types.endpoint_service import CreateEndpointRequest @@ -113,6 +114,7 @@ from .types.execution import Execution from .types.explanation import Attribution from .types.explanation import BlurBaselineConfig +from .types.explanation import Examples from .types.explanation import Explanation from .types.explanation import ExplanationMetadataOverride from .types.explanation import ExplanationParameters @@ -122,7 +124,6 @@ from .types.explanation import IntegratedGradientsAttribution from .types.explanation import ModelExplanation from .types.explanation import SampledShapleyAttribution -from .types.explanation import Similarity from .types.explanation import SmoothGradConfig from .types.explanation import XraiAttribution from .types.explanation_metadata import ExplanationMetadata @@ -258,6 +259,7 @@ from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec from .types.machine_resources import MachineSpec +from .types.machine_resources import NfsMount from .types.machine_resources import ResourcesConsumed from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters from .types.metadata_schema import MetadataSchema @@ -342,6 +344,7 @@ from .types.model_monitoring import SamplingStrategy from .types.model_monitoring import ThresholdConfig from .types.model_service import DeleteModelRequest +from .types.model_service import DeleteModelVersionRequest from .types.model_service import ExportModelOperationMetadata from .types.model_service import ExportModelRequest from .types.model_service import ExportModelResponse @@ -355,6 +358,9 @@ from .types.model_service import ListModelEvaluationsResponse from .types.model_service import ListModelsRequest from .types.model_service import ListModelsResponse +from .types.model_service import ListModelVersionsRequest +from .types.model_service import ListModelVersionsResponse +from .types.model_service import MergeVersionAliasesRequest from .types.model_service import UpdateModelRequest from .types.model_service import UploadModelOperationMetadata from .types.model_service import UploadModelRequest @@ -615,6 +621,7 @@ "DeleteMetadataStoreRequest", "DeleteModelDeploymentMonitoringJobRequest", "DeleteModelRequest", + "DeleteModelVersionRequest", "DeleteOperationMetadata", "DeletePipelineJobRequest", "DeleteSpecialistPoolRequest", @@ -645,6 +652,7 @@ "EntityType", "EnvVar", "Event", + "Examples", "Execution", "ExplainRequest", "ExplainResponse", @@ -775,6 +783,8 @@ "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", + "ListModelVersionsRequest", + "ListModelVersionsResponse", "ListModelsRequest", "ListModelsResponse", "ListOptimalTrialsRequest", @@ -801,6 +811,7 @@ "MachineSpec", "ManualBatchTuningParameters", "Measurement", + "MergeVersionAliasesRequest", "MetadataSchema", "MetadataServiceClient", "MetadataStore", @@ -826,6 +837,7 @@ "MutateDeployedIndexRequest", "MutateDeployedIndexResponse", "NearestNeighborSearchOperationMetadata", + "NfsMount", "PauseModelDeploymentMonitoringJobRequest", "PipelineJob", "PipelineJobDetail", @@ -836,6 +848,7 @@ "Port", "PredefinedSplit", "PredictRequest", + "PredictRequestResponseLoggingConfig", "PredictResponse", "PredictSchemata", "PredictionServiceClient", @@ -873,7 +886,6 @@ "SearchMigratableResourcesResponse", "SearchModelDeploymentMonitoringStatsAnomaliesRequest", "SearchModelDeploymentMonitoringStatsAnomaliesResponse", - "Similarity", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index 1be3df6faa..8786a28c47 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -1271,6 +1271,11 @@ "delete_model" ] }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, "ExportModel": { "methods": [ "export_model" @@ -1306,11 +1311,21 @@ "list_model_evaluations" ] }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, "ListModels": { "methods": [ "list_models" ] }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, "UpdateModel": { "methods": [ "update_model" @@ -1331,6 +1346,11 @@ "delete_model" ] }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, "ExportModel": { "methods": [ "export_model" @@ -1366,11 +1386,21 @@ "list_model_evaluations" ] }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, "ListModels": { "methods": [ "list_models" ] }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, "UpdateModel": { "methods": [ "update_model" diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index d119deb5fb..4091a72cf2 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index 80ad8d8db9..f0d94d3816 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index a21e6d8fea..74a13b9739 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -85,6 +85,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -292,5 +293,9 @@ def list_annotations( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("DatasetServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 87f5a68822..4613fd6025 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -520,5 +520,9 @@ def list_annotations( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("DatasetServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 178a96aa7e..3b939e5c71 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -804,9 +804,7 @@ async def deploy_model( *, endpoint: str = None, deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[ - endpoint_service.DeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -814,7 +812,6 @@ async def deploy_model( r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -866,7 +863,7 @@ def sample_deploy_model(): This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]`): + traffic_split (:class:`Mapping[str, int]`): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -963,9 +960,7 @@ async def undeploy_model( *, endpoint: str = None, deployed_model_id: str = None, - traffic_split: Sequence[ - endpoint_service.UndeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -974,7 +969,6 @@ async def undeploy_model( DeployedModel from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1018,7 +1012,7 @@ def sample_undeploy_model(): This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`): + traffic_split (:class:`Mapping[str, int]`): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index 0f70564671..c43e9965b0 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -1081,9 +1081,7 @@ def deploy_model( *, endpoint: str = None, deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[ - endpoint_service.DeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1091,7 +1089,6 @@ def deploy_model( r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1143,7 +1140,7 @@ def sample_deploy_model(): This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -1239,9 +1236,7 @@ def undeploy_model( *, endpoint: str = None, deployed_model_id: str = None, - traffic_split: Sequence[ - endpoint_service.UndeployModelRequest.TrafficSplitEntry - ] = None, + traffic_split: Mapping[str, int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1250,7 +1245,6 @@ def undeploy_model( DeployedModel from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1294,7 +1288,7 @@ def sample_undeploy_model(): This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index 0fae9183ee..6d9015fa08 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -84,6 +84,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -241,5 +242,9 @@ def undeploy_model( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("EndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index 8ff5abc059..a90f4ccefb 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -436,5 +436,9 @@ def undeploy_model( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("EndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py index 9eba5f97cc..6133afe135 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Mapping, Optional, AsyncIterable, Awaitable, @@ -244,7 +245,6 @@ async def read_feature_values( entities of an EntityType, please use StreamingReadFeatureValues. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -357,7 +357,6 @@ def streaming_read_feature_values( on their size, data for different entities may be broken up across multiple responses. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py index 8df25dc5f0..5d48e80724 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -452,7 +452,6 @@ def read_feature_values( entities of an EntityType, please use StreamingReadFeatureValues. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -565,7 +564,6 @@ def streaming_read_feature_values( on their size, data for different entities may be broken up across multiple responses. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py index 1ae632f3fa..f5c798f5e5 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -80,6 +80,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -165,5 +166,9 @@ def streaming_read_feature_values( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("FeaturestoreOnlineServingServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py index 752634b857..9e10f4a6d8 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -297,5 +297,9 @@ def streaming_read_feature_values( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("FeaturestoreOnlineServingServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index c4ddf221af..eb1537cec1 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -242,7 +242,6 @@ async def create_featurestore( r"""Creates a new Featurestore in a given project and location. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -724,7 +723,6 @@ async def delete_featurestore( any EntityTypes or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1336,7 +1334,6 @@ async def delete_entity_type( Features or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2216,7 +2213,6 @@ async def import_feature_values( or retention policy. - Online serving cluster is under-provisioned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2346,7 +2342,6 @@ async def batch_read_feature_values( correctness is guaranteed for Feature values of each read instance as of each instance's read timestamp. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2475,7 +2470,6 @@ async def export_feature_values( r"""Exports Feature values from all the entities of a target EntityType. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2600,7 +2594,6 @@ async def search_features( r"""Searches Features matching a query in a given project. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 7889852a39..71251ba078 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -507,7 +507,6 @@ def create_featurestore( r"""Creates a new Featurestore in a given project and location. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -989,7 +988,6 @@ def delete_featurestore( any EntityTypes or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1601,7 +1599,6 @@ def delete_entity_type( Features or ``force`` must be set to true for the request to succeed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2481,7 +2478,6 @@ def import_feature_values( or retention policy. - Online serving cluster is under-provisioned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2611,7 +2607,6 @@ def batch_read_feature_values( correctness is guaranteed for Feature values of each read instance as of each instance's read timestamp. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2742,7 +2737,6 @@ def export_feature_values( r"""Exports Feature values from all the entities of a target EntityType. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2867,7 +2861,6 @@ def search_features( r"""Searches Features matching a query in a given project. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py index 90ccc07c94..0deddb7c30 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -87,6 +87,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -435,5 +436,9 @@ def search_features( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("FeaturestoreServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py index 85b3e62051..ba4bf6adfd 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -843,5 +843,9 @@ def search_features( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("FeaturestoreServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 22af155b7f..14e2977345 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -791,7 +791,6 @@ async def deploy_index( DeployedIndex within it. Only non-empty Indexes can be deployed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -923,7 +922,6 @@ async def undeploy_index( DeployedIndex from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1050,7 +1048,6 @@ async def mutate_deployed_index( r"""Update an existing DeployedIndex under an IndexEndpoint. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index 800b89f834..ab97aaaf9e 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -1032,7 +1032,6 @@ def deploy_index( DeployedIndex within it. Only non-empty Indexes can be deployed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1164,7 +1163,6 @@ def undeploy_index( DeployedIndex from it, and freeing all resources it's using. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1291,7 +1289,6 @@ def mutate_deployed_index( r"""Update an existing DeployedIndex under an IndexEndpoint. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index ce4566feee..2456c7d98c 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -84,6 +84,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -258,5 +259,9 @@ def mutate_deployed_index( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index a98f9516d8..0a65758376 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -480,5 +480,9 @@ def mutate_deployed_index( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("IndexEndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py index a483b95b07..77c1d8695e 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -682,7 +682,6 @@ async def delete_index( [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py index d90408f3e3..1380a6dd30 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -926,7 +926,6 @@ def delete_index( [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py index 198a9e8718..7cbe58e9df 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -83,6 +83,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -211,5 +212,9 @@ def delete_index( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("IndexServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py index 2914c9f432..38ec09d8d2 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -381,5 +381,9 @@ def delete_index( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("IndexServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index d5e2268ea9..ecc727ee93 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -279,7 +279,6 @@ async def create_custom_job( r"""Creates a CustomJob. A created CustomJob right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -737,7 +736,6 @@ async def cancel_custom_job( [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1262,7 +1260,6 @@ async def cancel_data_labeling_job( r"""Cancels a DataLabelingJob. Success of cancellation is not guaranteed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1807,7 +1804,6 @@ async def cancel_hyperparameter_tuning_job( [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1894,7 +1890,6 @@ async def create_batch_prediction_job( r"""Creates a BatchPredictionJob. A BatchPredictionJob once created will right away be attempted to start. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2222,7 +2217,6 @@ async def delete_batch_prediction_job( r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2355,7 +2349,6 @@ async def cancel_batch_prediction_job( is set to ``CANCELLED``. Any files already outputted by the job are not deleted. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2444,7 +2437,6 @@ async def create_model_deployment_monitoring_job( r"""Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2562,7 +2554,6 @@ async def search_model_deployment_monitoring_stats_anomalies( r"""Searches Model Monitoring Statistics generated within a given time window. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3189,7 +3180,6 @@ async def pause_model_deployment_monitoring_job( [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3278,7 +3268,6 @@ async def resume_model_deployment_monitoring_job( will start to run from next scheduled time. A deleted ModelDeploymentMonitoringJob can't be resumed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index c86bf22779..0f4d8bf9e6 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -696,7 +696,6 @@ def create_custom_job( r"""Creates a CustomJob. A created CustomJob right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1154,7 +1153,6 @@ def cancel_custom_job( [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1679,7 +1677,6 @@ def cancel_data_labeling_job( r"""Cancels a DataLabelingJob. Success of cancellation is not guaranteed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2232,7 +2229,6 @@ def cancel_hyperparameter_tuning_job( [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2321,7 +2317,6 @@ def create_batch_prediction_job( r"""Creates a BatchPredictionJob. A BatchPredictionJob once created will right away be attempted to start. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2653,7 +2648,6 @@ def delete_batch_prediction_job( r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2788,7 +2782,6 @@ def cancel_batch_prediction_job( is set to ``CANCELLED``. Any files already outputted by the job are not deleted. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2879,7 +2872,6 @@ def create_model_deployment_monitoring_job( r"""Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3003,7 +2995,6 @@ def search_model_deployment_monitoring_stats_anomalies( r"""Searches Model Monitoring Statistics generated within a given time window. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3652,7 +3643,6 @@ def pause_model_deployment_monitoring_job( [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3745,7 +3735,6 @@ def resume_model_deployment_monitoring_job( will start to run from next scheduled time. A deleted ModelDeploymentMonitoringJob can't be resumed. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index cf545a0557..abeea8738b 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -101,6 +101,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -593,5 +594,9 @@ def resume_model_deployment_monitoring_job( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("JobServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index c53d1e39b1..a81f6eacf5 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -1133,5 +1133,9 @@ def resume_model_deployment_monitoring_job( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("JobServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index 9dbf97ce66..a17981e5c4 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -248,7 +248,6 @@ async def create_metadata_store( r"""Initializes a MetadataStore, including allocation of resources. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -592,7 +591,6 @@ async def delete_metadata_store( r"""Deletes a single MetadataStore and all its child resources (Artifacts, Executions, and Contexts). - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1072,7 +1070,7 @@ def sample_update_artifact(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -1738,7 +1736,7 @@ def sample_update_context(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2053,7 +2051,6 @@ async def add_context_artifacts_and_executions( If any of the Artifacts or Executions have already been added to a Context, they are simply skipped. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2179,7 +2176,6 @@ async def add_context_children( cycle or cause any Context to have more than 10 parents, the request will fail with an INVALID_ARGUMENT error. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2290,7 +2286,6 @@ async def query_context_lineage_subgraph( specified Context, connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2752,7 +2747,7 @@ def sample_update_execution(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -3066,7 +3061,6 @@ async def add_execution_events( between the Execution and the Artifact, the Event is skipped. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3176,7 +3170,6 @@ async def query_execution_inputs_and_outputs( this Execution, in the form of LineageSubgraph that also contains the Execution and connecting Events. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3610,7 +3603,6 @@ async def query_artifact_lineage_subgraph( Artifacts and Executions connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index d43e613e0e..0e852ec454 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -556,7 +556,6 @@ def create_metadata_store( r"""Initializes a MetadataStore, including allocation of resources. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -900,7 +899,6 @@ def delete_metadata_store( r"""Deletes a single MetadataStore and all its child resources (Artifacts, Executions, and Contexts). - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1380,7 +1378,7 @@ def sample_update_artifact(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2046,7 +2044,7 @@ def sample_update_context(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -2361,7 +2359,6 @@ def add_context_artifacts_and_executions( If any of the Artifacts or Executions have already been added to a Context, they are simply skipped. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2491,7 +2488,6 @@ def add_context_children( cycle or cause any Context to have more than 10 parents, the request will fail with an INVALID_ARGUMENT error. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2602,7 +2598,6 @@ def query_context_lineage_subgraph( specified Context, connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3066,7 +3061,7 @@ def sample_update_execution(): on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. @@ -3380,7 +3375,6 @@ def add_execution_events( between the Execution and the Artifact, the Event is skipped. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3490,7 +3484,6 @@ def query_execution_inputs_and_outputs( this Execution, in the form of LineageSubgraph that also contains the Execution and connecting Events. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3928,7 +3921,6 @@ def query_artifact_lineage_subgraph( Artifacts and Executions connected by Event edges and returned as a LineageSubgraph. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py index 5ffbc04350..87d0695bee 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -92,6 +92,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -620,5 +621,9 @@ def query_artifact_lineage_subgraph( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("MetadataServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index ab5aaf10aa..9c6e929fd7 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -1134,5 +1134,9 @@ def query_artifact_lineage_subgraph( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("MetadataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index 2042285909..8728c3f1ed 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -235,7 +235,6 @@ async def search_migratable_resources( ml.googleapis.com that can be migrated to Vertex AI's given location. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -352,7 +351,6 @@ async def batch_migrate_resources( automl.googleapis.com, and datalabeling.googleapis.com to Vertex AI. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index c728f8f156..c8a4120cef 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -192,40 +192,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -572,7 +572,6 @@ def search_migratable_resources( ml.googleapis.com that can be migrated to Vertex AI's given location. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -691,7 +690,6 @@ def batch_migrate_resources( automl.googleapis.com, and datalabeling.googleapis.com to Vertex AI. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py index ca2a177dd6..f1ea087e4e 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -82,6 +82,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -169,5 +170,9 @@ def batch_migrate_resources( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("MigrationServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index 3b8163cbba..bc87a02497 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -310,5 +310,9 @@ def batch_migrate_resources( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("MigrationServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index 6330543ac3..897819a3ec 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -558,6 +558,114 @@ def sample_list_models(): # Done; return the response. return response + async def list_model_versions( + self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsAsyncPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + name (:class:`str`): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ListModelVersionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_versions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelVersionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + async def update_model( self, request: Union[model_service.UpdateModelRequest, dict] = None, @@ -597,8 +705,29 @@ def sample_update_model(): The request object. Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): - Required. The Model which replaces - the resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -686,7 +815,6 @@ async def delete_model( [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -797,6 +925,255 @@ def sample_delete_model(): # Done; return the response. return response + async def delete_model_version( + self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + name (:class:`str`): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.DeleteModelVersionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_version, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def merge_version_aliases( + self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + name (:class:`str`): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (:class:`Sequence[str]`): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.MergeVersionAliasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases: + request.version_aliases.extend(version_aliases) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.merge_version_aliases, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def export_model( self, request: Union[model_service.ExportModelRequest, dict] = None, @@ -812,7 +1189,6 @@ async def export_model( least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 6368b818a1..02acd097e1 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -862,6 +862,114 @@ def sample_list_models(): # Done; return the response. return response + def list_model_versions( + self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + name (str): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelVersionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelVersionsRequest): + request = model_service.ListModelVersionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_versions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelVersionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + def update_model( self, request: Union[model_service.UpdateModelRequest, dict] = None, @@ -901,8 +1009,29 @@ def sample_update_model(): The request object. Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces - the resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -990,7 +1119,6 @@ def delete_model( [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1101,6 +1229,255 @@ def sample_delete_model(): # Done; return the response. return response + def delete_model_version( + self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + name (str): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelVersionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelVersionRequest): + request = model_service.DeleteModelVersionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_version] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def merge_version_aliases( + self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + name (str): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.MergeVersionAliasesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.MergeVersionAliasesRequest): + request = model_service.MergeVersionAliasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases is not None: + request.version_aliases = version_aliases + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.merge_version_aliases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def export_model( self, request: Union[model_service.ExportModelRequest, dict] = None, @@ -1116,7 +1493,6 @@ def export_model( least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py index 71e681b306..5c6dd885ff 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py @@ -158,6 +158,134 @@ def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) +class ListModelVersionsPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_service.ListModelVersionsResponse], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelVersionsAsyncPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[model_service.ListModelVersionsResponse]], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + class ListModelEvaluationsPager: """A pager for iterating through ``list_model_evaluations`` requests. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index 4c5811183b..232a8945bd 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -89,6 +89,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -144,6 +145,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.list_model_versions: gapic_v1.method.wrap_method( + self.list_model_versions, + default_timeout=None, + client_info=client_info, + ), self.update_model: gapic_v1.method.wrap_method( self.update_model, default_timeout=5.0, @@ -154,6 +160,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.delete_model_version: gapic_v1.method.wrap_method( + self.delete_model_version, + default_timeout=None, + client_info=client_info, + ), + self.merge_version_aliases: gapic_v1.method.wrap_method( + self.merge_version_aliases, + default_timeout=None, + client_info=client_info, + ), self.export_model: gapic_v1.method.wrap_method( self.export_model, default_timeout=5.0, @@ -229,6 +245,18 @@ def list_models( ]: raise NotImplementedError() + @property + def list_model_versions( + self, + ) -> Callable[ + [model_service.ListModelVersionsRequest], + Union[ + model_service.ListModelVersionsResponse, + Awaitable[model_service.ListModelVersionsResponse], + ], + ]: + raise NotImplementedError() + @property def update_model( self, @@ -247,6 +275,24 @@ def delete_model( ]: raise NotImplementedError() + @property + def delete_model_version( + self, + ) -> Callable[ + [model_service.DeleteModelVersionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def merge_version_aliases( + self, + ) -> Callable[ + [model_service.MergeVersionAliasesRequest], + Union[model.Model, Awaitable[model.Model]], + ]: + raise NotImplementedError() + @property def export_model( self, @@ -316,5 +362,9 @@ def list_model_evaluation_slices( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("ModelServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 2e3a393ce1..9e7c5971a4 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -327,6 +327,35 @@ def list_models( ) return self._stubs["list_models"] + @property + def list_model_versions( + self, + ) -> Callable[ + [model_service.ListModelVersionsRequest], + model_service.ListModelVersionsResponse, + ]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + ~.ListModelVersionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_versions" not in self._stubs: + self._stubs["list_model_versions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/ListModelVersions", + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs["list_model_versions"] + @property def update_model( self, @@ -387,6 +416,64 @@ def delete_model( ) return self._stubs["delete_model"] + @property + def delete_model_version( + self, + ) -> Callable[[model_service.DeleteModelVersionRequest], operations_pb2.Operation]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_version" not in self._stubs: + self._stubs["delete_model_version"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/DeleteModelVersion", + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_version"] + + @property + def merge_version_aliases( + self, + ) -> Callable[[model_service.MergeVersionAliasesRequest], model.Model]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "merge_version_aliases" not in self._stubs: + self._stubs["merge_version_aliases"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/MergeVersionAliases", + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["merge_version_aliases"] + @property def export_model( self, @@ -563,5 +650,9 @@ def list_model_evaluation_slices( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("ModelServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index ae6b4fcb93..e88c9094aa 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -338,6 +338,35 @@ def list_models( ) return self._stubs["list_models"] + @property + def list_model_versions( + self, + ) -> Callable[ + [model_service.ListModelVersionsRequest], + Awaitable[model_service.ListModelVersionsResponse], + ]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + Awaitable[~.ListModelVersionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_versions" not in self._stubs: + self._stubs["list_model_versions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/ListModelVersions", + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs["list_model_versions"] + @property def update_model( self, @@ -400,6 +429,66 @@ def delete_model( ) return self._stubs["delete_model"] + @property + def delete_model_version( + self, + ) -> Callable[ + [model_service.DeleteModelVersionRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_version" not in self._stubs: + self._stubs["delete_model_version"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/DeleteModelVersion", + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_version"] + + @property + def merge_version_aliases( + self, + ) -> Callable[[model_service.MergeVersionAliasesRequest], Awaitable[model.Model]]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "merge_version_aliases" not in self._stubs: + self._stubs["merge_version_aliases"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/MergeVersionAliases", + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["merge_version_aliases"] + @property def export_model( self, diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index 3c3fbbd517..283878ea02 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -255,7 +255,6 @@ async def create_training_pipeline( r"""Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -713,7 +712,6 @@ async def cancel_training_pipeline( [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -801,7 +799,6 @@ async def create_pipeline_job( r"""Creates a PipelineJob. A PipelineJob will run immediately when created. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1255,7 +1252,6 @@ async def cancel_pipeline_job( [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 1c9efbfee6..35c577acee 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -638,7 +638,6 @@ def create_training_pipeline( r"""Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1096,7 +1095,6 @@ def cancel_training_pipeline( [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1184,7 +1182,6 @@ def create_pipeline_job( r"""Creates a PipelineJob. A PipelineJob will run immediately when created. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1638,7 +1635,6 @@ def cancel_pipeline_job( [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] is set to ``CANCELLED``. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index b91866aa45..730c005fb6 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -89,6 +89,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -297,5 +298,9 @@ def cancel_pipeline_job( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("PipelineServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 0308bfbbb9..b1c13101fb 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -561,5 +561,9 @@ def cancel_pipeline_job( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index a5ecb31fda..6ef78767f1 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -362,7 +362,6 @@ async def raw_predict( [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served this prediction. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -545,7 +544,6 @@ async def explain( populated. Only deployed AutoML tabular Models have explanation_spec. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 42c1da6d66..f1c9bde6bd 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -606,7 +606,6 @@ def raw_predict( [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served this prediction. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -789,7 +788,6 @@ def explain( populated. Only deployed AutoML tabular Models have explanation_spec. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index bde625137d..bfc1fd7433 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -81,6 +81,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -180,5 +181,9 @@ def explain( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("PredictionServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 9294f76d3c..88a76f75c5 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -334,5 +334,9 @@ def explain( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index f9f360b64e..6fafc8eaf5 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -584,7 +584,6 @@ async def delete_specialist_pool( r"""Deletes a SpecialistPool as well as all Specialists in the pool. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index ab87308083..78d9b7348e 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -801,7 +801,6 @@ def delete_specialist_pool( r"""Deletes a SpecialistPool as well as all Specialists in the pool. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index 9810015efd..2eac1de5f8 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -83,6 +83,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -214,5 +215,9 @@ def update_specialist_pool( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("SpecialistPoolServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py index 24ddb7076b..aced58a56d 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -396,5 +396,9 @@ def update_specialist_pool( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("SpecialistPoolServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 80cb9dd7bc..94de444d8f 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Mapping, Optional, AsyncIterable, Awaitable, @@ -2122,7 +2123,6 @@ async def batch_create_tensorboard_time_series( r"""Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2818,7 +2818,6 @@ async def batch_read_tensorboard_time_series_data( Otherwise, that limit number of data points will be randomly selected from this time series and returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -2928,7 +2927,6 @@ async def read_tensorboard_time_series_data( from this time series and returned. This value can be changed by changing max_data_points, which can't be greater than 10k. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3030,7 +3028,6 @@ def read_tensorboard_blob_data( project's Cloud Storage bucket without users having to obtain Cloud Storage access permission. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3137,7 +3134,6 @@ async def write_tensorboard_experiment_data( TensorboardTimeSeries in multiple TensorboardRun's. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3254,7 +3250,6 @@ async def write_tensorboard_run_data( TensorboardTimeSeries under a TensorboardRun. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3376,7 +3371,6 @@ async def export_tensorboard_time_series_data( r"""Exports a TensorboardTimeSeries' data. Data is returned in paginated responses. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index f78c692fd2..9eb39795e7 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -2420,7 +2420,6 @@ def batch_create_tensorboard_time_series( r"""Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3140,7 +3139,6 @@ def batch_read_tensorboard_time_series_data( Otherwise, that limit number of data points will be randomly selected from this time series and returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3256,7 +3254,6 @@ def read_tensorboard_time_series_data( from this time series and returned. This value can be changed by changing max_data_points, which can't be greater than 10k. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3362,7 +3359,6 @@ def read_tensorboard_blob_data( project's Cloud Storage bucket without users having to obtain Cloud Storage access permission. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3471,7 +3467,6 @@ def write_tensorboard_experiment_data( TensorboardTimeSeries in multiple TensorboardRun's. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3592,7 +3587,6 @@ def write_tensorboard_run_data( TensorboardTimeSeries under a TensorboardRun. If any data fail to be ingested, an error will be returned. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -3716,7 +3710,6 @@ def export_tensorboard_time_series_data( r"""Exports a TensorboardTimeSeries' data. Data is returned in paginated responses. - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py index 081a781301..0623f08c9b 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -93,6 +93,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -603,5 +604,9 @@ def export_tensorboard_time_series_data( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("TensorboardServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py index 24d5d6001b..36ab5fcc18 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -1109,5 +1109,9 @@ def export_tensorboard_time_series_data( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("TensorboardServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index 5f9ea9c73c..517bfc30d9 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -228,7 +228,6 @@ async def create_study( r"""Creates a Study. A resource name will be generated after creation of the Study. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -435,7 +434,6 @@ async def list_studies( r"""Lists all the studies in a region for an associated project. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -630,7 +628,6 @@ async def lookup_study( r"""Looks a study up using the user-defined display_name field instead of the fully qualified resource name. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -729,7 +726,6 @@ async def suggest_trials( long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1137,7 +1133,6 @@ async def add_trial_measurement( Trial. This measurement is assumed to have been taken before the Trial is complete. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1378,7 +1373,6 @@ async def check_trial_early_stopping_state( will contain a [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1550,7 +1544,6 @@ async def list_optimal_trials( pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index cc936e692b..f6f5ae69d3 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -492,7 +492,6 @@ def create_study( r"""Creates a Study. A resource name will be generated after creation of the Study. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -699,7 +698,6 @@ def list_studies( r"""Lists all the studies in a region for an associated project. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -894,7 +892,6 @@ def lookup_study( r"""Looks a study up using the user-defined display_name field instead of the fully qualified resource name. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -993,7 +990,6 @@ def suggest_trials( long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1402,7 +1398,6 @@ def add_trial_measurement( Trial. This measurement is assumed to have been taken before the Trial is complete. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1645,7 +1640,6 @@ def check_trial_early_stopping_state( will contain a [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - .. code-block:: python from google.cloud import aiplatform_v1beta1 @@ -1821,7 +1815,6 @@ def list_optimal_trials( pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency - .. code-block:: python from google.cloud import aiplatform_v1beta1 diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py index 8d1e50daa9..9611a8fe84 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -85,6 +85,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -355,5 +356,9 @@ def list_optimal_trials( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("VizierServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index ed6f69b166..c80ebfcf5c 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -664,5 +664,9 @@ def list_optimal_trials( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("VizierServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index cdd7349d5b..7951d5ca8c 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -13,12 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .annotation import Annotation -from .annotation_spec import AnnotationSpec -from .artifact import Artifact -from .batch_prediction_job import BatchPredictionJob -from .completion_stats import CompletionStats -from .context import Context +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) from .custom_job import ( ContainerSpec, CustomJob, @@ -27,7 +39,9 @@ Scheduling, WorkerPoolSpec, ) -from .data_item import DataItem +from .data_item import ( + DataItem, +) from .data_labeling_job import ( ActiveLearningConfig, DataLabelingJob, @@ -59,12 +73,19 @@ ListDatasetsResponse, UpdateDatasetRequest, ) -from .deployed_index_ref import DeployedIndexRef -from .deployed_model_ref import DeployedModelRef -from .encryption_spec import EncryptionSpec +from .deployed_index_ref import ( + DeployedIndexRef, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) from .endpoint import ( DeployedModel, Endpoint, + PredictRequestResponseLoggingConfig, PrivateEndpoints, ) from .endpoint_service import ( @@ -82,13 +103,22 @@ UndeployModelResponse, UpdateEndpointRequest, ) -from .entity_type import EntityType -from .env_var import EnvVar -from .event import Event -from .execution import Execution +from .entity_type import ( + EntityType, +) +from .env_var import ( + EnvVar, +) +from .event import ( + Event, +) +from .execution import ( + Execution, +) from .explanation import ( Attribution, BlurBaselineConfig, + Examples, Explanation, ExplanationMetadataOverride, ExplanationParameters, @@ -98,19 +128,28 @@ IntegratedGradientsAttribution, ModelExplanation, SampledShapleyAttribution, - Similarity, SmoothGradConfig, XraiAttribution, ) -from .explanation_metadata import ExplanationMetadata -from .feature import Feature -from .feature_monitoring_stats import FeatureStatsAnomaly +from .explanation_metadata import ( + ExplanationMetadata, +) +from .feature import ( + Feature, +) +from .feature_monitoring_stats import ( + FeatureStatsAnomaly, +) from .feature_selector import ( FeatureSelector, IdMatcher, ) -from .featurestore import Featurestore -from .featurestore_monitoring import FeaturestoreMonitoringConfig +from .featurestore import ( + Featurestore, +) +from .featurestore_monitoring import ( + FeaturestoreMonitoringConfig, +) from .featurestore_online_service import ( FeatureValue, FeatureValueList, @@ -158,8 +197,12 @@ UpdateFeaturestoreOperationMetadata, UpdateFeaturestoreRequest, ) -from .hyperparameter_tuning_job import HyperparameterTuningJob -from .index import Index +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .index import ( + Index, +) from .index_endpoint import ( DeployedIndex, DeployedIndexAuthConfig, @@ -243,7 +286,9 @@ UpdateModelDeploymentMonitoringJobOperationMetadata, UpdateModelDeploymentMonitoringJobRequest, ) -from .lineage_subgraph import LineageSubgraph +from .lineage_subgraph import ( + LineageSubgraph, +) from .machine_resources import ( AutomaticResources, AutoscalingMetricSpec, @@ -251,10 +296,15 @@ DedicatedResources, DiskSpec, MachineSpec, + NfsMount, ResourcesConsumed, ) -from .manual_batch_tuning_parameters import ManualBatchTuningParameters -from .metadata_schema import MetadataSchema +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .metadata_schema import ( + MetadataSchema, +) from .metadata_service import ( AddContextArtifactsAndExecutionsRequest, AddContextArtifactsAndExecutionsResponse, @@ -304,8 +354,12 @@ UpdateContextRequest, UpdateExecutionRequest, ) -from .metadata_store import MetadataStore -from .migratable_resource import MigratableResource +from .metadata_store import ( + MetadataStore, +) +from .migratable_resource import ( + MigratableResource, +) from .migration_service import ( BatchMigrateResourcesOperationMetadata, BatchMigrateResourcesRequest, @@ -329,8 +383,12 @@ ModelMonitoringStatsAnomalies, ModelDeploymentMonitoringObjectiveType, ) -from .model_evaluation import ModelEvaluation -from .model_evaluation_slice import ModelEvaluationSlice +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) from .model_monitoring import ( ModelMonitoringAlertConfig, ModelMonitoringObjectiveConfig, @@ -339,6 +397,7 @@ ) from .model_service import ( DeleteModelRequest, + DeleteModelVersionRequest, ExportModelOperationMetadata, ExportModelRequest, ExportModelResponse, @@ -352,6 +411,9 @@ ListModelEvaluationsResponse, ListModelsRequest, ListModelsResponse, + ListModelVersionsRequest, + ListModelVersionsResponse, + MergeVersionAliasesRequest, UpdateModelRequest, UploadModelOperationMetadata, UploadModelRequest, @@ -388,7 +450,9 @@ PredictResponse, RawPredictRequest, ) -from .specialist_pool import SpecialistPool +from .specialist_pool import ( + SpecialistPool, +) from .specialist_pool_service import ( CreateSpecialistPoolOperationMetadata, CreateSpecialistPoolRequest, @@ -405,7 +469,9 @@ StudySpec, Trial, ) -from .tensorboard import Tensorboard +from .tensorboard import ( + Tensorboard, +) from .tensorboard_data import ( Scalar, TensorboardBlob, @@ -414,8 +480,12 @@ TimeSeriesData, TimeSeriesDataPoint, ) -from .tensorboard_experiment import TensorboardExperiment -from .tensorboard_run import TensorboardRun +from .tensorboard_experiment import ( + TensorboardExperiment, +) +from .tensorboard_run import ( + TensorboardRun, +) from .tensorboard_service import ( BatchCreateTensorboardRunsRequest, BatchCreateTensorboardRunsResponse, @@ -460,7 +530,9 @@ WriteTensorboardRunDataRequest, WriteTensorboardRunDataResponse, ) -from .tensorboard_time_series import TensorboardTimeSeries +from .tensorboard_time_series import ( + TensorboardTimeSeries, +) from .training_pipeline import ( FilterSplit, FractionSplit, @@ -476,9 +548,15 @@ Int64Array, StringArray, ) -from .unmanaged_container_model import UnmanagedContainerModel -from .user_action_reference import UserActionReference -from .value import Value +from .unmanaged_container_model import ( + UnmanagedContainerModel, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) from .vizier_service import ( AddTrialMeasurementRequest, CheckTrialEarlyStoppingStateMetatdata, @@ -549,6 +627,7 @@ "EncryptionSpec", "DeployedModel", "Endpoint", + "PredictRequestResponseLoggingConfig", "PrivateEndpoints", "CreateEndpointOperationMetadata", "CreateEndpointRequest", @@ -569,6 +648,7 @@ "Execution", "Attribution", "BlurBaselineConfig", + "Examples", "Explanation", "ExplanationMetadataOverride", "ExplanationParameters", @@ -578,7 +658,6 @@ "IntegratedGradientsAttribution", "ModelExplanation", "SampledShapleyAttribution", - "Similarity", "SmoothGradConfig", "XraiAttribution", "ExplanationMetadata", @@ -714,6 +793,7 @@ "DedicatedResources", "DiskSpec", "MachineSpec", + "NfsMount", "ResourcesConsumed", "ManualBatchTuningParameters", "MetadataSchema", @@ -790,6 +870,7 @@ "SamplingStrategy", "ThresholdConfig", "DeleteModelRequest", + "DeleteModelVersionRequest", "ExportModelOperationMetadata", "ExportModelRequest", "ExportModelResponse", @@ -803,6 +884,9 @@ "ListModelEvaluationsResponse", "ListModelsRequest", "ListModelsResponse", + "ListModelVersionsRequest", + "ListModelVersionsResponse", + "MergeVersionAliasesRequest", "UpdateModelRequest", "UploadModelOperationMetadata", "UploadModelRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/annotation.py b/google/cloud/aiplatform_v1beta1/types/annotation.py index 879ffe6627..0af30d2e8d 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -61,7 +61,7 @@ class Annotation(proto.Message): "overwrite" update happens. annotation_source (google.cloud.aiplatform_v1beta1.types.UserActionReference): Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Annotations. diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index 6f08714b73..df2e59b99e 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -45,7 +45,7 @@ class Artifact(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index b056318b61..2b54b55692 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -64,6 +64,9 @@ class BatchPredictionJob(proto.Message): Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. + model_version_id (str): + Output only. The version ID of the Model that + produces the predictions via this job. unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel): Contains model information necessary to perform batch prediction without requiring uploading to model registry. @@ -192,7 +195,7 @@ class BatchPredictionJob(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the BatchPredictionJob was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 @@ -414,6 +417,10 @@ class OutputInfo(proto.Message): proto.STRING, number=3, ) + model_version_id = proto.Field( + proto.STRING, + number=30, + ) unmanaged_container_model = proto.Field( proto.MESSAGE, number=28, diff --git a/google/cloud/aiplatform_v1beta1/types/context.py b/google/cloud/aiplatform_v1beta1/types/context.py index 1679501626..40d271bba2 100644 --- a/google/cloud/aiplatform_v1beta1/types/context.py +++ b/google/cloud/aiplatform_v1beta1/types/context.py @@ -41,7 +41,7 @@ class Context(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Context.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 353cbe7ca7..367859fcc1 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -71,7 +71,7 @@ class CustomJob(proto.Message): error (google.rpc.status_pb2.Status): Output only. Only populated when job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 @@ -86,7 +86,7 @@ class CustomJob(proto.Message): CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. - web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.WebAccessUrisEntry]): + web_access_uris (Mapping[str, str]): Output only. URIs for accessing `interactive shells `__ (one URI for each training node). Only available if @@ -198,6 +198,15 @@ class CustomJobSpec(proto.Message): If this field is left unspecified, the job is not peered with any network. + reserved_ip_ranges (Sequence[str]): + Optional. A list of names for the reserved ip ranges under + the VPC network that can be used for this job. + + If set, we will deploy the job within the provided ip + ranges. Otherwise, the job will be deployed to any ip ranges + under the provided VPC network. + + Example: ['vertex-ai-ip-range']. base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For @@ -264,6 +273,10 @@ class CustomJobSpec(proto.Message): proto.STRING, number=5, ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=13, + ) base_output_directory = proto.Field( proto.MESSAGE, number=6, @@ -304,6 +317,8 @@ class WorkerPoolSpec(proto.Message): replica_count (int): Optional. The number of worker replicas to use for this worker pool. + nfs_mounts (Sequence[google.cloud.aiplatform_v1beta1.types.NfsMount]): + Optional. List of NFS mount spec. disk_spec (google.cloud.aiplatform_v1beta1.types.DiskSpec): Disk spec. """ @@ -329,6 +344,11 @@ class WorkerPoolSpec(proto.Message): proto.INT64, number=2, ) + nfs_mounts = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=machine_resources.NfsMount, + ) disk_spec = proto.Field( proto.MESSAGE, number=5, diff --git a/google/cloud/aiplatform_v1beta1/types/data_item.py b/google/cloud/aiplatform_v1beta1/types/data_item.py index 87e4b011bf..b8e464b9dd 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_item.py +++ b/google/cloud/aiplatform_v1beta1/types/data_item.py @@ -41,7 +41,7 @@ class DataItem(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DataItem was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py index 40bf66de30..fcf727b3fb 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -52,7 +52,7 @@ class DataLabelingJob(proto.Message): Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.AnnotationLabelsEntry]): + annotation_labels (Mapping[str, str]): Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 @@ -101,7 +101,7 @@ class DataLabelingJob(proto.Message): Output only. DataLabelingJob errors. It is only populated when job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your DataLabelingJobs. diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index f2f2033e00..10fc9c1320 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -64,7 +64,7 @@ class Dataset(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Datasets. @@ -151,7 +151,7 @@ class ImportDataConfig(proto.Message): input content. This field is a member of `oneof`_ ``source``. - data_item_labels (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig.DataItemLabelsEntry]): + data_item_labels (Mapping[str, str]): Labels that will be applied to newly imported DataItems. If an identical DataItem as one being imported already exists in the Dataset, then these labels will be appended to these diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 424d5b0939..85ff783e64 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -17,6 +17,7 @@ from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import machine_resources from google.protobuf import timestamp_pb2 # type: ignore @@ -27,6 +28,7 @@ "Endpoint", "DeployedModel", "PrivateEndpoints", + "PredictRequestResponseLoggingConfig", }, ) @@ -52,7 +54,7 @@ class Endpoint(proto.Message): and [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -66,7 +68,7 @@ class Endpoint(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 @@ -107,7 +109,8 @@ class Endpoint(proto.Message): ``{project}`` is a project number, as in ``12345``, and ``{network}`` is network name. enable_private_service_connect (bool): - If true, expose the Endpoint via private service connect. + Deprecated: If true, expose the Endpoint via private service + connect. Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] @@ -119,6 +122,9 @@ class Endpoint(proto.Message): associated with this Endpoint if monitoring is enabled by [CreateModelDeploymentMonitoringJob][]. Format: ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + predict_request_response_logging_config (google.cloud.aiplatform_v1beta1.types.PredictRequestResponseLoggingConfig): + Configures the request-response logging for + online prediction. """ name = proto.Field( @@ -179,6 +185,11 @@ class Endpoint(proto.Message): proto.STRING, number=14, ) + predict_request_response_logging_config = proto.Field( + proto.MESSAGE, + number=18, + message="PredictRequestResponseLoggingConfig", + ) class DeployedModel(proto.Message): @@ -216,6 +227,9 @@ class DeployedModel(proto.Message): the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. + model_version_id (str): + Output only. The version ID of the model that + is deployed. display_name (str): The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. @@ -295,6 +309,10 @@ class DeployedModel(proto.Message): proto.STRING, number=2, ) + model_version_id = proto.Field( + proto.STRING, + number=18, + ) display_name = proto.Field( proto.STRING, number=3, @@ -368,4 +386,39 @@ class PrivateEndpoints(proto.Message): ) +class PredictRequestResponseLoggingConfig(proto.Message): + r"""Configuration for logging request-response to a BigQuery + table. + + Attributes: + enabled (bool): + If logging is enabled or not. + sampling_rate (float): + Percentage of requests to be logged, expressed as a fraction + in range(0,1]. + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + BigQuery table for logging. If only given a project, a new + dataset will be created with name + ``logging__`` where will + be made BigQuery-dataset-name compatible (e.g. most special + characters will become underscores). If no table name is + given, a new table will be created with name + ``request_response_logging`` + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + sampling_rate = proto.Field( + proto.DOUBLE, + number=2, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.BigQueryDestination, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index d47fd4755d..2eeebec5ff 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -264,7 +264,7 @@ class DeployModelRequest(proto.Message): must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. @@ -343,7 +343,7 @@ class UndeployModelRequest(proto.Message): deployed_model_id (str): Required. The ID of the DeployedModel to be undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): + traffic_split (Mapping[str, int]): If this field is provided, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being diff --git a/google/cloud/aiplatform_v1beta1/types/entity_type.py b/google/cloud/aiplatform_v1beta1/types/entity_type.py index a6d1819e95..2e29d24000 100644 --- a/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ b/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -51,7 +51,7 @@ class EntityType(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this EntityType was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/event.py b/google/cloud/aiplatform_v1beta1/types/event.py index de653ba0c3..747f55ccd2 100644 --- a/google/cloud/aiplatform_v1beta1/types/event.py +++ b/google/cloud/aiplatform_v1beta1/types/event.py @@ -41,7 +41,7 @@ class Event(proto.Message): Output only. Time the Event occurred. type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): Required. The type of the Event. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Event.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index 2c1f386811..937c7a4041 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -48,7 +48,7 @@ class Execution(proto.Message): An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Execution.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 083541fde1..2a81fe19ca 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -34,7 +34,7 @@ "SmoothGradConfig", "FeatureNoiseSigma", "BlurBaselineConfig", - "Similarity", + "Examples", "ExplanationSpecOverride", "ExplanationMetadataOverride", }, @@ -337,8 +337,8 @@ class ExplanationParameters(proto.Message): Gradients instead. This field is a member of `oneof`_ ``method``. - similarity (google.cloud.aiplatform_v1beta1.types.Similarity): - Similarity explainability that returns the + examples (google.cloud.aiplatform_v1beta1.types.Examples): + Example-based explanations that returns the nearest neighbors from the provided dataset. This field is a member of `oneof`_ ``method``. @@ -381,11 +381,11 @@ class ExplanationParameters(proto.Message): oneof="method", message="XraiAttribution", ) - similarity = proto.Field( + examples = proto.Field( proto.MESSAGE, number=7, oneof="method", - message="Similarity", + message="Examples", ) top_k = proto.Field( proto.INT32, @@ -652,9 +652,9 @@ class BlurBaselineConfig(proto.Message): ) -class Similarity(proto.Message): - r"""Similarity explainability that returns the nearest neighbors - from the provided dataset. +class Examples(proto.Message): + r"""Example-based explainability that returns the nearest + neighbors from the provided dataset. Attributes: gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): @@ -665,6 +665,8 @@ class Similarity(proto.Message): the same as [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] and should match NearestNeighborSearchConfig. + neighbor_count (int): + The number of neighbors to return. """ gcs_source = proto.Field( @@ -677,6 +679,10 @@ class Similarity(proto.Message): number=2, message=struct_pb2.Value, ) + neighbor_count = proto.Field( + proto.INT32, + number=3, + ) class ExplanationSpecOverride(proto.Message): @@ -717,7 +723,7 @@ class ExplanationMetadataOverride(proto.Message): time. Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputMetadataOverride]): Required. Overrides the [input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] of the features. The key is the name of the feature to be diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 4d05a2d902..230b9b0af1 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -31,7 +31,7 @@ class ExplanationMetadata(proto.Message): explanation. Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata]): Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. @@ -48,7 +48,7 @@ class ExplanationMetadata(proto.Message): For custom images, the key must match with the key in [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): + outputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputMetadata]): Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py index 6ea9065c67..6b6a6f5dfc 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature.py +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -53,7 +53,7 @@ class Feature(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this EntityType was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 @@ -72,10 +72,10 @@ class Feature(proto.Message): read-modify-write updates. If not set, a blind "overwrite" update happens. monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): - Optional. The custom monitoring configuration for this - Feature, if not set, use the monitoring_config defined for - the EntityType this Feature belongs to. Only Features with - type + Optional. Deprecated: The custom monitoring configuration + for this Feature, if not set, use the monitoring_config + defined for the EntityType this Feature belongs to. Only + Features with type ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) BOOL, STRING, DOUBLE or INT64 can enable monitoring. diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index 6f27198cdd..e10503041f 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -46,7 +46,7 @@ class Featurestore(proto.Message): Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore.LabelsEntry]): + labels (Mapping[str, str]): Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 @@ -61,8 +61,10 @@ class Featurestore(proto.Message): System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): - Required. Config for online serving - resources. + Optional. Config for online storage + resources. If unset, the featurestore will not + have an online store and cannot be used for + online serving. state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): Output only. State of the featurestore. encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): @@ -73,7 +75,7 @@ class Featurestore(proto.Message): """ class State(proto.Enum): - r"""Possible states a Featurestore can have.""" + r"""Possible states a featurestore can have.""" STATE_UNSPECIFIED = 0 STABLE = 1 UPDATING = 2 @@ -84,11 +86,12 @@ class OnlineServingConfig(proto.Message): Attributes: fixed_node_count (int): - The number of nodes for each cluster. The number of nodes - will not scale automatically but can be scaled manually by - providing different values when updating. Only one of - ``fixed_node_count`` and ``scaling`` can be set. Setting one - will reset the other. + The number of nodes for the online store. The + number of nodes doesn't scale automatically, but + you can manually update the number of nodes. If + set to 0, the featurestore will not have an + online store and cannot be used for online + serving. scaling (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig.Scaling): Online serving scaling configuration. Only one of ``fixed_node_count`` and ``scaling`` can be set. Setting one @@ -107,7 +110,8 @@ class Scaling(proto.Message): 1. max_node_count (int): The maximum number of nodes to scale up to. Must be greater - or equal to min_node_count. + than min_node_count, and less than or equal to 10 times of + 'min_node_count'. """ min_node_count = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py index 1ebfbda314..11c88fdfa4 100644 --- a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -84,7 +84,7 @@ class HyperparameterTuningJob(proto.Message): error (google.rpc.status_pb2.Status): Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize HyperparameterTuningJobs. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/index.py b/google/cloud/aiplatform_v1beta1/types/index.py index ba45711854..96efd0f02d 100644 --- a/google/cloud/aiplatform_v1beta1/types/index.py +++ b/google/cloud/aiplatform_v1beta1/types/index.py @@ -66,7 +66,7 @@ class Index(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index 3e7cc3c31e..593aa01378 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -52,7 +52,7 @@ class IndexEndpoint(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your IndexEndpoints. Label keys and values can be no longer than 64 @@ -81,19 +81,18 @@ class IndexEndpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. - Only one of the fields, [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], - can be set. + and + [private_service_connect_config][google.cloud.aiplatform.v1beta1.IndexEndpoint.private_service_connect_config] + are mutually exclusive. `Format `__: projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. enable_private_service_connect (bool): - Optional. If true, expose the IndexEndpoint via private - service connect. + Optional. Deprecated: If true, expose the IndexEndpoint via + private service connect. Only one of the fields, [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] diff --git a/google/cloud/aiplatform_v1beta1/types/job_state.py b/google/cloud/aiplatform_v1beta1/types/job_state.py index 72297e5e94..937f6b9baa 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_state.py +++ b/google/cloud/aiplatform_v1beta1/types/job_state.py @@ -36,6 +36,7 @@ class JobState(proto.Enum): JOB_STATE_CANCELLED = 7 JOB_STATE_PAUSED = 8 JOB_STATE_EXPIRED = 9 + JOB_STATE_UPDATING = 10 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index 63060fb7df..4cd48fdad3 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -29,6 +29,7 @@ "BatchDedicatedResources", "ResourcesConsumed", "DiskSpec", + "NfsMount", "AutoscalingMetricSpec", }, ) @@ -109,6 +110,12 @@ class DedicatedResources(proto.Message): will use [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] as the default value. + + The value of this field impacts the charge against Vertex + CPU and GPU quotas. Specifically, you will be charged for + (max_replica_count \* number of cores in the selected + machine type) and (max_replica_count \* number of GPUs per + replica in the selected machine type). autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's @@ -278,6 +285,36 @@ class DiskSpec(proto.Message): ) +class NfsMount(proto.Message): + r"""Represents a mount configuration for Network File System + (NFS) to mount. + + Attributes: + server (str): + Required. IP address of the NFS server. + path (str): + Required. Source path exported from NFS server. Has to start + with '/', and combined with the ip address, it indicates the + source mount path in the form of ``server:path`` + mount_point (str): + Required. Destination mount path. The NFS will be mounted + for the user under /mnt/nfs/ + """ + + server = proto.Field( + proto.STRING, + number=1, + ) + path = proto.Field( + proto.STRING, + number=2, + ) + mount_point = proto.Field( + proto.STRING, + number=3, + ) + + class AutoscalingMetricSpec(proto.Message): r"""The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py index 96a1a046a3..db995197b6 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -423,7 +423,7 @@ class UpdateArtifactRequest(proto.Message): Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): @@ -719,7 +719,7 @@ class UpdateContextRequest(proto.Message): field is used to identify the Context to be updated. Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): @@ -1112,7 +1112,7 @@ class UpdateExecutionRequest(proto.Message): Format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields + Optional. A FieldMask indicating which fields should be updated. Functionality of this field is not yet supported. allow_missing (bool): diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index b5202939dc..d315d2952e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -40,12 +40,36 @@ class Model(proto.Message): Attributes: name (str): The resource name of the Model. + version_id (str): + Output only. Immutable. The version ID of the + model. A new version is committed when a new + model version is uploaded or trained under an + existing model id. It is an auto-incrementing + decimal number in string representation. + version_aliases (Sequence[str]): + User provided version aliases so that a model version can be + referenced via alias (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_alias} + instead of auto-generated version id (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_id}). + The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to + distinguish from version_id. A default version alias will be + created for the first version of the model, and there must + be exactly one default version alias for a model. + version_create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + created. + version_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + most recently updated. display_name (str): Required. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. description (str): The description of the Model. + version_description (str): + The description of this version. predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): The schemata that describe formats of the Model's predictions and explanations as given and returned via @@ -236,7 +260,7 @@ class Model(proto.Message): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Model.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 @@ -309,6 +333,24 @@ class ExportableContent(proto.Enum): proto.STRING, number=1, ) + version_id = proto.Field( + proto.STRING, + number=28, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=29, + ) + version_create_time = proto.Field( + proto.MESSAGE, + number=31, + message=timestamp_pb2.Timestamp, + ) + version_update_time = proto.Field( + proto.MESSAGE, + number=32, + message=timestamp_pb2.Timestamp, + ) display_name = proto.Field( proto.STRING, number=2, @@ -317,6 +359,10 @@ class ExportableContent(proto.Enum): proto.STRING, number=3, ) + version_description = proto.Field( + proto.STRING, + number=30, + ) predict_schemata = proto.Field( proto.MESSAGE, number=4, diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 85da918c31..23da241f85 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -78,6 +78,9 @@ class ModelDeploymentMonitoringJob(proto.Message): schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): Output only. Schedule state when the monitoring job is in Running state. + latest_monitoring_pipeline_metadata (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LatestMonitoringPipelineMetadata): + Output only. Latest triggered monitoring + pipeline metadata. model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): Required. The config for monitoring objectives. This is a per DeployedModel config. @@ -131,7 +134,7 @@ class ModelDeploymentMonitoringJob(proto.Message): the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600} indicates ttl = 1 day. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. @@ -180,6 +183,29 @@ class MonitoringScheduleState(proto.Enum): OFFLINE = 2 RUNNING = 3 + class LatestMonitoringPipelineMetadata(proto.Message): + r"""All metadata of most recent monitoring pipelines. + + Attributes: + run_time (google.protobuf.timestamp_pb2.Timestamp): + The time that most recent monitoring + pipelines that is related to this run. + status (google.rpc.status_pb2.Status): + The status of the most recent monitoring + pipeline. + """ + + run_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + name = proto.Field( proto.STRING, number=1, @@ -202,6 +228,11 @@ class MonitoringScheduleState(proto.Enum): number=5, enum=MonitoringScheduleState, ) + latest_monitoring_pipeline_metadata = proto.Field( + proto.MESSAGE, + number=25, + message=LatestMonitoringPipelineMetadata, + ) model_deployment_monitoring_objective_configs = proto.RepeatedField( proto.MESSAGE, number=6, diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py index e64a163b92..e347fe7785 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -30,7 +30,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): - r"""Next ID: 6 + r"""Next ID: 8 Attributes: training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): @@ -131,14 +131,14 @@ class TrainingPredictionSkewDetectionConfig(proto.Message): parameters. Attributes: - skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): + skew_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. - attribution_score_skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.AttributionScoreSkewThresholdsEntry]): + attribution_score_skew_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training @@ -162,14 +162,14 @@ class PredictionDriftDetectionConfig(proto.Message): r"""The config for Prediction data drift detection. Attributes: - drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): + drift_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. - attribution_score_drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.AttributionScoreDriftThresholdsEntry]): + attribution_score_drift_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index 363cebfdc4..5aaadd0376 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -34,8 +34,12 @@ "GetModelRequest", "ListModelsRequest", "ListModelsResponse", + "ListModelVersionsRequest", + "ListModelVersionsResponse", "UpdateModelRequest", "DeleteModelRequest", + "DeleteModelVersionRequest", + "MergeVersionAliasesRequest", "ExportModelRequest", "ExportModelOperationMetadata", "ExportModelResponse", @@ -59,6 +63,17 @@ class UploadModelRequest(proto.Message): Required. The resource name of the Location into which to upload the Model. Format: ``projects/{project}/locations/{location}`` + parent_model (str): + Optional. The resource name of the model into + which to upload the version. Only specify this + field when uploading a new version. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. model (google.cloud.aiplatform_v1beta1.types.Model): Required. The Model to create. """ @@ -67,6 +82,14 @@ class UploadModelRequest(proto.Message): proto.STRING, number=1, ) + parent_model = proto.Field( + proto.STRING, + number=4, + ) + model_id = proto.Field( + proto.STRING, + number=5, + ) model = proto.Field( proto.MESSAGE, number=2, @@ -100,12 +123,19 @@ class UploadModelResponse(proto.Message): model (str): The name of the uploaded Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` + model_version_id (str): + Output only. The version ID of the model that + is uploaded. """ model = proto.Field( proto.STRING, number=1, ) + model_version_id = proto.Field( + proto.STRING, + number=2, + ) class GetModelRequest(proto.Message): @@ -216,14 +246,119 @@ def raw_page(self): ) +class ListModelVersionsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + + Attributes: + name (str): + Required. The name of the model to list + versions for. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelVersionsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsResponse.next_page_token] + of the previous [ModelService.ListModelversions][] call. + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``labels.myKey="myValue"`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelVersionsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): + List of Model versions in the requested page. + In the returned Model name field, version ID + instead of regvision tag will be included. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListModelVersionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + class UpdateModelRequest(proto.Message): r"""Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. Attributes: model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces the - resource on the server. + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, refers to + a version specific update. + 2. model.name without the @ value, e.g. models/123, refers + to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a model + update. + 4. Supported model fields: display_name, description; + supported version-specific fields: version_description. + Labels are supported in both scenarios. Both the model + labels and the version labels are merged when a model is + returned. When updating labels, if the request is for + model-specific update, model label gets updated. + Otherwise, version labels get updated. + 5. A model name or model version name fields update mismatch + will cause a precondition error. + 6. One request cannot update both the model and the version + fields. You must update them separately. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see @@ -259,6 +394,62 @@ class DeleteModelRequest(proto.Message): ) +class DeleteModelVersionRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + + Attributes: + name (str): + Required. The name of the model version to be deleted, with + a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class MergeVersionAliasesRequest(proto.Message): + r"""Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + + Attributes: + name (str): + Required. The name of the model version to merge aliases, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix to + an alias means removing that alias from the version. ``-`` + is NOT counted in the 128 characters. Example: ``-golden`` + means removing the ``golden`` alias from the version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have the + exactly same order from this MergeVersionAliases API. 2) + Adding and deleting the same alias in the request is not + recommended, and the 2 operations will be cancelled out. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=2, + ) + + class ExportModelRequest(proto.Message): r"""Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index d6edba23d2..3a79d4ec07 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -58,7 +58,7 @@ class PipelineJob(proto.Message): Output only. Timestamp when this PipelineJob was most recently updated. pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. + The spec of the pipeline. state (google.cloud.aiplatform_v1beta1.types.PipelineState): Output only. The detailed state of the job. job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): @@ -68,7 +68,7 @@ class PipelineJob(proto.Message): Output only. The error that occurred during pipeline execution. Only populated when the pipeline's state is FAILED or CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 @@ -115,7 +115,7 @@ class RuntimeConfig(proto.Message): r"""The runtime config of a PipelineJob. Attributes: - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + parameters (Mapping[str, google.cloud.aiplatform_v1beta1.types.Value]): Deprecated. Use [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] instead. The runtime parameters of the PipelineJob. The @@ -135,7 +135,7 @@ class RuntimeConfig(proto.Message): specified output directory. The service account specified in this pipeline must have the ``storage.objects.get`` and ``storage.objects.create`` permissions for this bucket. - parameter_values (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParameterValuesEntry]): + parameter_values (Mapping[str, google.protobuf.struct_pb2.Value]): The runtime parameters of the PipelineJob. The parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] @@ -302,10 +302,10 @@ class PipelineTaskDetail(proto.Message): Output only. A list of task status. This field keeps a record of task status evolving over time. - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.ArtifactList]): Output only. The runtime input artifacts of the task. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.OutputsEntry]): + outputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.ArtifactList]): Output only. The runtime output artifacts of the task. """ diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index 297bd5bfe8..f236fa402f 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -286,6 +286,10 @@ class ListPipelineJobsRequest(proto.Message): ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version_name``: Supports ``=``, + ``!=`` comparisons, and ``:`` wildcard. Filter expressions can be combined together using logical operators (``AND`` & ``OR``). For example: diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index dfcfa1638e..3d9a520ad9 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -97,6 +97,10 @@ class PredictResponse(proto.Message): Output only. The resource name of the Model which is deployed as the DeployedModel that this prediction hits. + model_version_id (str): + Output only. The version ID of the Model + which is deployed as the DeployedModel that this + prediction hits. model_display_name (str): Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of @@ -117,6 +121,10 @@ class PredictResponse(proto.Message): proto.STRING, number=3, ) + model_version_id = proto.Field( + proto.STRING, + number=5, + ) model_display_name = proto.Field( proto.STRING, number=4, diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index 6cd0611323..8e122d12a4 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -139,7 +139,7 @@ class Trial(proto.Message): Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. - web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.WebAccessUrisEntry]): + web_access_uris (Mapping[str, str]): Output only. URIs for accessing `interactive shells `__ (one URI for each training node). Only available if this @@ -276,6 +276,11 @@ class StudySpec(proto.Message): The automated early stopping using convex stopping rule. + This field is a member of `oneof`_ ``automated_stopping_spec``. + convex_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexAutomatedStoppingSpec): + The automated early stopping spec using + convex stopping rule. + This field is a member of `oneof`_ ``automated_stopping_spec``. metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): Required. Metric specs for the Study. @@ -723,6 +728,77 @@ class MedianAutomatedStoppingSpec(proto.Message): number=1, ) + class ConvexAutomatedStoppingSpec(proto.Message): + r"""Configuration for ConvexAutomatedStoppingSpec. When there are enough + completed trials (configured by min_measurement_count), for pending + trials with enough measurements and steps, the policy first computes + an overestimate of the objective value at max_num_steps according to + the slope of the incomplete objective value curve. No prediction can + be made if the curve is completely flat. If the overestimation is + worse than the best objective value of the completed trials, this + pending trial will be early-stopped, but a last measurement will be + added to the pending trial with max_num_steps and predicted + objective value from the autoregression model. + + Attributes: + max_step_count (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. If not defined, it will + learn it from the completed trials. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_step_count (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with step_count > + min_step_count won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_step_count is set to be one-tenth of + the max_step_count. When use_elapsed_duration is true, this + field is set to the minimum elapsed seconds. + min_measurement_count (int): + The minimal number of measurements in a Trial. + Early-stopping checks will not trigger if less than + min_measurement_count+1 completed trials or pending trials + with less than min_measurement_count measurements. If not + defined, the default value is 5. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_elapsed_duration (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If + use_elapsed_duration==false, the early stopping decision is + made according to the predicted objective values according + to the target steps. If use_elapsed_duration==true, + elapsed_secs is used instead of steps. Also, in this case, + the parameters max_num_steps and min_num_steps are + overloaded to contain max_elapsed_seconds and + min_elapsed_seconds. + """ + + max_step_count = proto.Field( + proto.INT64, + number=1, + ) + min_step_count = proto.Field( + proto.INT64, + number=2, + ) + min_measurement_count = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_elapsed_duration = proto.Field( + proto.BOOL, + number=5, + ) + class ConvexStopConfig(proto.Message): r"""Configuration for ConvexStopPolicy. @@ -800,6 +876,12 @@ class ConvexStopConfig(proto.Message): oneof="automated_stopping_spec", message=ConvexStopConfig, ) + convex_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=9, + oneof="automated_stopping_spec", + message=ConvexAutomatedStoppingSpec, + ) metrics = proto.RepeatedField( proto.MESSAGE, number=1, diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/google/cloud/aiplatform_v1beta1/types/tensorboard.py index 87bab3fb32..d84252fe4a 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -61,7 +61,7 @@ class Tensorboard(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Tensorboard was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py index 9416df3e41..d09e35c6d8 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py @@ -46,7 +46,7 @@ class TensorboardExperiment(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this TensorboardExperiment was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your Datasets. diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py index bd39c151f6..17f38512b0 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py @@ -48,7 +48,7 @@ class TensorboardRun(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this TensorboardRun was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize your TensorboardRuns. diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index af2006195d..2190b1bae0 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -110,6 +110,17 @@ class TrainingPipeline(proto.Message): [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model is always uploaded into the Project and Location in which this pipeline is. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. + parent_model (str): + Optional. When specify this field, the ``model_to_upload`` + will not be uploaded as a new model, instead, it will become + a new version of this ``parent_model``. state (google.cloud.aiplatform_v1beta1.types.PipelineState): Output only. The detailed state of the pipeline. @@ -129,7 +140,7 @@ class TrainingPipeline(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the TrainingPipeline was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline.LabelsEntry]): + labels (Mapping[str, str]): The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 @@ -181,6 +192,14 @@ class TrainingPipeline(proto.Message): number=7, message=model.Model, ) + model_id = proto.Field( + proto.STRING, + number=22, + ) + parent_model = proto.Field( + proto.STRING, + number=21, + ) state = proto.Field( proto.ENUM, number=9, diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py new file mode 100644 index 0000000000..540cfe1fb1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py new file mode 100644 index 0000000000..d8ee80ef9d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py new file mode 100644 index 0000000000..a5b5a951a3 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelVersions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelVersions_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py new file mode 100644 index 0000000000..d717dd002d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py new file mode 100644 index 0000000000..517425df71 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = await client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py new file mode 100644 index 0000000000..fe8cb60202 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync] diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1.json index 249a78cb01..89cfc722d2 100644 --- a/samples/generated_samples/snippet_metadata_aiplatform_v1.json +++ b/samples/generated_samples/snippet_metadata_aiplatform_v1.json @@ -1,16 +1,65 @@ { + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.aiplatform.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-aiplatform" + }, "snippets": [ { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.create_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.CreateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "CreateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_dataset" }, + "description": "Sample for CreateDataset", "file": "aiplatform_v1_generated_dataset_service_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_CreateDataset_async", "segments": [ { @@ -43,18 +92,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_create_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.create_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.CreateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "CreateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_dataset" }, + "description": "Sample for CreateDataset", "file": "aiplatform_v1_generated_dataset_service_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_CreateDataset_sync", "segments": [ { @@ -87,19 +176,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_create_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.delete_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.DeleteDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "DeleteDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" }, + "description": "Sample for DeleteDataset", "file": "aiplatform_v1_generated_dataset_service_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_DeleteDataset_async", "segments": [ { @@ -132,18 +257,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_delete_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.delete_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.DeleteDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "DeleteDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" }, + "description": "Sample for DeleteDataset", "file": "aiplatform_v1_generated_dataset_service_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_DeleteDataset_sync", "segments": [ { @@ -176,19 +337,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_delete_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.export_data", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ExportData", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ExportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" }, + "description": "Sample for ExportData", "file": "aiplatform_v1_generated_dataset_service_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ExportData_async", "segments": [ { @@ -221,18 +422,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_export_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.export_data", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ExportData", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ExportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" }, + "description": "Sample for ExportData", "file": "aiplatform_v1_generated_dataset_service_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ExportData_sync", "segments": [ { @@ -265,19 +506,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_export_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.get_annotation_spec", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetAnnotationSpec" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" }, + "description": "Sample for GetAnnotationSpec", "file": "aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_GetAnnotationSpec_async", "segments": [ { @@ -310,18 +587,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.get_annotation_spec", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetAnnotationSpec" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" }, + "description": "Sample for GetAnnotationSpec", "file": "aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_GetAnnotationSpec_sync", "segments": [ { @@ -354,19 +667,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.get_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "get_dataset" }, + "description": "Sample for GetDataset", "file": "aiplatform_v1_generated_dataset_service_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_GetDataset_async", "segments": [ { @@ -399,18 +748,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_get_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.get_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "get_dataset" }, + "description": "Sample for GetDataset", "file": "aiplatform_v1_generated_dataset_service_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_GetDataset_sync", "segments": [ { @@ -443,19 +828,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_get_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.import_data", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ImportData", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ImportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" }, + "description": "Sample for ImportData", "file": "aiplatform_v1_generated_dataset_service_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ImportData_async", "segments": [ { @@ -488,18 +913,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_import_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.import_data", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ImportData", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ImportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" }, + "description": "Sample for ImportData", "file": "aiplatform_v1_generated_dataset_service_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ImportData_sync", "segments": [ { @@ -532,19 +997,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_import_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_annotations", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListAnnotations", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListAnnotations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager", + "shortName": "list_annotations" }, + "description": "Sample for ListAnnotations", "file": "aiplatform_v1_generated_dataset_service_list_annotations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListAnnotations_async", "segments": [ { @@ -577,18 +1078,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_annotations_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_annotations", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListAnnotations", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListAnnotations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager", + "shortName": "list_annotations" }, + "description": "Sample for ListAnnotations", "file": "aiplatform_v1_generated_dataset_service_list_annotations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListAnnotations_sync", "segments": [ { @@ -621,19 +1158,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_annotations_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_data_items", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDataItems", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDataItems" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager", + "shortName": "list_data_items" }, + "description": "Sample for ListDataItems", "file": "aiplatform_v1_generated_dataset_service_list_data_items_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListDataItems_async", "segments": [ { @@ -666,18 +1239,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_data_items_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_data_items", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDataItems", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDataItems" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager", + "shortName": "list_data_items" }, + "description": "Sample for ListDataItems", "file": "aiplatform_v1_generated_dataset_service_list_data_items_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListDataItems_sync", "segments": [ { @@ -710,19 +1319,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_data_items_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_datasets", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDatasets", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDatasets" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" }, + "description": "Sample for ListDatasets", "file": "aiplatform_v1_generated_dataset_service_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListDatasets_async", "segments": [ { @@ -755,18 +1400,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_datasets_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_datasets", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDatasets", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDatasets" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager", + "shortName": "list_datasets" }, + "description": "Sample for ListDatasets", "file": "aiplatform_v1_generated_dataset_service_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_ListDatasets_sync", "segments": [ { @@ -799,19 +1480,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_list_datasets_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.update_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.UpdateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "UpdateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "update_dataset" }, + "description": "Sample for UpdateDataset", "file": "aiplatform_v1_generated_dataset_service_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_UpdateDataset_async", "segments": [ { @@ -844,18 +1565,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_update_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.update_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.UpdateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", "shortName": "DatasetService" }, "shortName": "UpdateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "update_dataset" }, + "description": "Sample for UpdateDataset", "file": "aiplatform_v1_generated_dataset_service_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_DatasetService_UpdateDataset_sync", "segments": [ { @@ -888,19 +1649,63 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_dataset_service_update_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.create_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "CreateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_endpoint" }, + "description": "Sample for CreateEndpoint", "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_async", "segments": [ { @@ -933,18 +1738,62 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.create_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "CreateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_endpoint" }, + "description": "Sample for CreateEndpoint", "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_sync", "segments": [ { @@ -977,19 +1826,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.delete_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeleteEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_endpoint" }, + "description": "Sample for DeleteEndpoint", "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_async", "segments": [ { @@ -1022,18 +1907,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.delete_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeleteEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_endpoint" }, + "description": "Sample for DeleteEndpoint", "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync", "segments": [ { @@ -1066,19 +1987,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.deploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" }, + "description": "Sample for DeployModel", "file": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_async", "segments": [ { @@ -1111,18 +2076,62 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.deploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" }, + "description": "Sample for DeployModel", "file": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_sync", "segments": [ { @@ -1155,19 +2164,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.get_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "GetEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" }, + "description": "Sample for GetEndpoint", "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_async", "segments": [ { @@ -1200,18 +2245,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.get_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "GetEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" }, + "description": "Sample for GetEndpoint", "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_sync", "segments": [ { @@ -1244,19 +2325,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.list_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "ListEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager", + "shortName": "list_endpoints" }, + "description": "Sample for ListEndpoints", "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_async", "segments": [ { @@ -1289,18 +2406,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.list_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "ListEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager", + "shortName": "list_endpoints" }, + "description": "Sample for ListEndpoints", "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_sync", "segments": [ { @@ -1333,19 +2486,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.undeploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UndeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "UndeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" }, + "description": "Sample for UndeployModel", "file": "aiplatform_v1_generated_endpoint_service_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_UndeployModel_async", "segments": [ { @@ -1378,18 +2575,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_undeploy_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.undeploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UndeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "UndeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" }, + "description": "Sample for UndeployModel", "file": "aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_UndeployModel_sync", "segments": [ { @@ -1422,19 +2663,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.update_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "UpdateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "update_endpoint" }, + "description": "Sample for UpdateEndpoint", "file": "aiplatform_v1_generated_endpoint_service_update_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_UpdateEndpoint_async", "segments": [ { @@ -1467,18 +2748,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_update_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.update_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", "shortName": "EndpointService" }, "shortName": "UpdateEndpoint" - } - }, - "file": "aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py", + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_EndpointService_UpdateEndpoint_sync", "segments": [ { @@ -1511,19 +2832,55 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "ReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" }, + "description": "Sample for ReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", "segments": [ { @@ -1556,18 +2913,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "ReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" }, + "description": "Sample for ReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", "segments": [ { @@ -1600,19 +2993,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "StreamingReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" }, + "description": "Sample for StreamingReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", "segments": [ { @@ -1645,18 +3074,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "StreamingReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" }, + "description": "Sample for StreamingReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", "segments": [ { @@ -1689,19 +3154,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.batch_create_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchCreateFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_create_features" }, + "description": "Sample for BatchCreateFeatures", "file": "aiplatform_v1_generated_featurestore_service_batch_create_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_async", "segments": [ { @@ -1734,18 +3239,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_create_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.batch_create_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchCreateFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_create_features" }, + "description": "Sample for BatchCreateFeatures", "file": "aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_sync", "segments": [ { @@ -1778,19 +3323,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.batch_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_read_feature_values" }, + "description": "Sample for BatchReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_async", "segments": [ { @@ -1823,18 +3404,54 @@ "start": 59, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.batch_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_read_feature_values" }, + "description": "Sample for BatchReadFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_sync", "segments": [ { @@ -1867,19 +3484,63 @@ "start": 59, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_entity_type" }, + "description": "Sample for CreateEntityType", "file": "aiplatform_v1_generated_featurestore_service_create_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateEntityType_async", "segments": [ { @@ -1912,18 +3573,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_entity_type" }, + "description": "Sample for CreateEntityType", "file": "aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateEntityType_sync", "segments": [ { @@ -1956,19 +3661,63 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature" }, + "description": "Sample for CreateFeature", "file": "aiplatform_v1_generated_featurestore_service_create_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeature_async", "segments": [ { @@ -2001,18 +3750,62 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature" }, + "description": "Sample for CreateFeature", "file": "aiplatform_v1_generated_featurestore_service_create_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeature_sync", "segments": [ { @@ -2045,19 +3838,63 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_featurestore" }, + "description": "Sample for CreateFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_create_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_async", "segments": [ { @@ -2090,18 +3927,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_featurestore" }, + "description": "Sample for CreateFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_sync", "segments": [ { @@ -2134,19 +4015,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_entity_type" }, + "description": "Sample for DeleteEntityType", "file": "aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_async", "segments": [ { @@ -2179,18 +4100,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_entity_type" }, + "description": "Sample for DeleteEntityType", "file": "aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_sync", "segments": [ { @@ -2223,19 +4184,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" }, + "description": "Sample for DeleteFeature", "file": "aiplatform_v1_generated_featurestore_service_delete_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeature_async", "segments": [ { @@ -2268,18 +4265,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" }, + "description": "Sample for DeleteFeature", "file": "aiplatform_v1_generated_featurestore_service_delete_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeature_sync", "segments": [ { @@ -2312,19 +4345,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_featurestore" }, + "description": "Sample for DeleteFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_async", "segments": [ { @@ -2357,18 +4430,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_featurestore" }, + "description": "Sample for DeleteFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_sync", "segments": [ { @@ -2401,19 +4514,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.export_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ExportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_feature_values" }, + "description": "Sample for ExportFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_export_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_async", "segments": [ { @@ -2446,18 +4595,54 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_export_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.export_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ExportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_feature_values" }, + "description": "Sample for ExportFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_sync", "segments": [ { @@ -2490,19 +4675,55 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "get_entity_type" }, + "description": "Sample for GetEntityType", "file": "aiplatform_v1_generated_featurestore_service_get_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetEntityType_async", "segments": [ { @@ -2535,18 +4756,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "get_entity_type" }, + "description": "Sample for GetEntityType", "file": "aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetEntityType_sync", "segments": [ { @@ -2579,19 +4836,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "get_feature" }, + "description": "Sample for GetFeature", "file": "aiplatform_v1_generated_featurestore_service_get_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeature_async", "segments": [ { @@ -2624,18 +4917,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "get_feature" }, + "description": "Sample for GetFeature", "file": "aiplatform_v1_generated_featurestore_service_get_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeature_sync", "segments": [ { @@ -2668,19 +4997,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Featurestore", + "shortName": "get_featurestore" }, + "description": "Sample for GetFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_get_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_async", "segments": [ { @@ -2713,18 +5078,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Featurestore", + "shortName": "get_featurestore" }, + "description": "Sample for GetFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_sync", "segments": [ { @@ -2757,19 +5158,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.import_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ImportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_feature_values" }, + "description": "Sample for ImportFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_import_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_async", "segments": [ { @@ -2802,18 +5239,54 @@ "start": 55, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_import_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.import_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ImportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_feature_values" }, + "description": "Sample for ImportFeatureValues", "file": "aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_sync", "segments": [ { @@ -2846,19 +5319,55 @@ "start": 55, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_entity_types", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListEntityTypes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", + "shortName": "list_entity_types" }, + "description": "Sample for ListEntityTypes", "file": "aiplatform_v1_generated_featurestore_service_list_entity_types_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_async", "segments": [ { @@ -2891,18 +5400,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_entity_types_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_entity_types", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListEntityTypes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesPager", + "shortName": "list_entity_types" }, + "description": "Sample for ListEntityTypes", "file": "aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_sync", "segments": [ { @@ -2935,21 +5480,57 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" - } - }, - "file": "aiplatform_v1_generated_featurestore_service_list_features_async.py", - "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeatures_async", - "segments": [ + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1_generated_featurestore_service_list_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeatures_async", + "segments": [ { "end": 45, "start": 27, @@ -2980,18 +5561,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesPager", + "shortName": "list_features" }, + "description": "Sample for ListFeatures", "file": "aiplatform_v1_generated_featurestore_service_list_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeatures_sync", "segments": [ { @@ -3024,19 +5641,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_featurestores", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeaturestores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", + "shortName": "list_featurestores" }, + "description": "Sample for ListFeaturestores", "file": "aiplatform_v1_generated_featurestore_service_list_featurestores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_async", "segments": [ { @@ -3069,18 +5722,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_featurestores_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_featurestores", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeaturestores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresPager", + "shortName": "list_featurestores" }, + "description": "Sample for ListFeaturestores", "file": "aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_sync", "segments": [ { @@ -3113,19 +5802,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.search_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "SearchFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", + "shortName": "search_features" }, + "description": "Sample for SearchFeatures", "file": "aiplatform_v1_generated_featurestore_service_search_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_SearchFeatures_async", "segments": [ { @@ -3158,18 +5887,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_search_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.search_features", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "SearchFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesPager", + "shortName": "search_features" }, + "description": "Sample for SearchFeatures", "file": "aiplatform_v1_generated_featurestore_service_search_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_SearchFeatures_sync", "segments": [ { @@ -3202,19 +5971,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_search_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "update_entity_type" }, + "description": "Sample for UpdateEntityType", "file": "aiplatform_v1_generated_featurestore_service_update_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_async", "segments": [ { @@ -3247,18 +6056,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "update_entity_type" }, + "description": "Sample for UpdateEntityType", "file": "aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_sync", "segments": [ { @@ -3291,19 +6140,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "update_feature" }, + "description": "Sample for UpdateFeature", "file": "aiplatform_v1_generated_featurestore_service_update_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeature_async", "segments": [ { @@ -3336,18 +6225,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_feature", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "update_feature" }, + "description": "Sample for UpdateFeature", "file": "aiplatform_v1_generated_featurestore_service_update_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeature_sync", "segments": [ { @@ -3380,19 +6309,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_featurestore" }, + "description": "Sample for UpdateFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_update_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_async", "segments": [ { @@ -3425,18 +6394,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_featurestore" }, + "description": "Sample for UpdateFeaturestore", "file": "aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_sync", "segments": [ { @@ -3469,19 +6478,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.create_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "CreateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index_endpoint" }, + "description": "Sample for CreateIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async", "segments": [ { @@ -3514,18 +6563,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.create_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "CreateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index_endpoint" }, + "description": "Sample for CreateIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync", "segments": [ { @@ -3558,19 +6647,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.delete_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeleteIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index_endpoint" }, + "description": "Sample for DeleteIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async", "segments": [ { @@ -3603,18 +6728,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.delete_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeleteIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index_endpoint" }, + "description": "Sample for DeleteIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", "segments": [ { @@ -3647,19 +6808,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.deploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_index" }, + "description": "Sample for DeployIndex", "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_async", "segments": [ { @@ -3692,18 +6893,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.deploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_index" }, + "description": "Sample for DeployIndex", "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync", "segments": [ { @@ -3736,19 +6977,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.get_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "GetIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, + "description": "Sample for GetIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async", "segments": [ { @@ -3781,18 +7058,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.get_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "GetIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, + "description": "Sample for GetIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync", "segments": [ { @@ -3825,19 +7138,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.list_index_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "ListIndexEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" }, + "description": "Sample for ListIndexEndpoints", "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async", "segments": [ { @@ -3870,18 +7219,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.list_index_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "ListIndexEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" }, + "description": "Sample for ListIndexEndpoints", "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync", "segments": [ { @@ -3914,19 +7299,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.mutate_deployed_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "MutateDeployedIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "mutate_deployed_index" }, + "description": "Sample for MutateDeployedIndex", "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async", "segments": [ { @@ -3959,18 +7384,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.mutate_deployed_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "MutateDeployedIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "mutate_deployed_index" }, + "description": "Sample for MutateDeployedIndex", "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync", "segments": [ { @@ -4003,19 +7468,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.undeploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UndeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_index" }, + "description": "Sample for UndeployIndex", "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async", "segments": [ { @@ -4048,18 +7553,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.undeploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UndeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_index" }, + "description": "Sample for UndeployIndex", "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync", "segments": [ { @@ -4092,19 +7637,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.update_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UpdateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, + "description": "Sample for UpdateIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async", "segments": [ { @@ -4137,18 +7722,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.update_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UpdateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, + "description": "Sample for UpdateIndexEndpoint", "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", "segments": [ { @@ -4181,19 +7806,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.create_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "CreateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index" }, + "description": "Sample for CreateIndex", "file": "aiplatform_v1_generated_index_service_create_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_async", "segments": [ { @@ -4226,18 +7891,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_create_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.create_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "CreateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index" }, + "description": "Sample for CreateIndex", "file": "aiplatform_v1_generated_index_service_create_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_sync", "segments": [ { @@ -4270,19 +7975,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_create_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.delete_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "DeleteIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index" }, + "description": "Sample for DeleteIndex", "file": "aiplatform_v1_generated_index_service_delete_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_async", "segments": [ { @@ -4315,18 +8056,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_delete_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.delete_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "DeleteIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index" }, + "description": "Sample for DeleteIndex", "file": "aiplatform_v1_generated_index_service_delete_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_sync", "segments": [ { @@ -4359,19 +8136,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_delete_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.get_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "GetIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" }, + "description": "Sample for GetIndex", "file": "aiplatform_v1_generated_index_service_get_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_async", "segments": [ { @@ -4404,18 +8217,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_get_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.get_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "GetIndex" - } - }, + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", "file": "aiplatform_v1_generated_index_service_get_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_sync", "segments": [ { @@ -4448,19 +8297,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_get_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.list_indexes", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "ListIndexes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" }, + "description": "Sample for ListIndexes", "file": "aiplatform_v1_generated_index_service_list_indexes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_async", "segments": [ { @@ -4493,18 +8378,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_list_indexes_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.list_indexes", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "ListIndexes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" }, + "description": "Sample for ListIndexes", "file": "aiplatform_v1_generated_index_service_list_indexes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_sync", "segments": [ { @@ -4537,19 +8458,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_list_indexes_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.update_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "UpdateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_index" }, + "description": "Sample for UpdateIndex", "file": "aiplatform_v1_generated_index_service_update_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_async", "segments": [ { @@ -4582,18 +8543,58 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_update_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.update_index", "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, "shortName": "UpdateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_index" }, + "description": "Sample for UpdateIndex", "file": "aiplatform_v1_generated_index_service_update_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_sync", "segments": [ { @@ -4626,19 +8627,54 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_index_service_update_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" }, + "description": "Sample for CancelBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async", "segments": [ { @@ -4669,18 +8705,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" }, + "description": "Sample for CancelBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync", "segments": [ { @@ -4711,19 +8782,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" }, + "description": "Sample for CancelCustomJob", "file": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_async", "segments": [ { @@ -4754,18 +8860,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" }, + "description": "Sample for CancelCustomJob", "file": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_sync", "segments": [ { @@ -4796,19 +8937,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" }, + "description": "Sample for CancelDataLabelingJob", "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_async", "segments": [ { @@ -4839,18 +9015,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" }, + "description": "Sample for CancelDataLabelingJob", "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync", "segments": [ { @@ -4881,19 +9092,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" }, + "description": "Sample for CancelHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async", "segments": [ { @@ -4924,18 +9170,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CancelHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" }, + "description": "Sample for CancelHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync", "segments": [ { @@ -4966,19 +9247,59 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, + "description": "Sample for CreateBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async", "segments": [ { @@ -5011,18 +9332,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, + "description": "Sample for CreateBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync", "segments": [ { @@ -5055,19 +9416,59 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" }, + "description": "Sample for CreateCustomJob", "file": "aiplatform_v1_generated_job_service_create_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_async", "segments": [ { @@ -5100,18 +9501,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" }, + "description": "Sample for CreateCustomJob", "file": "aiplatform_v1_generated_job_service_create_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_sync", "segments": [ { @@ -5144,19 +9585,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, + "description": "Sample for CreateDataLabelingJob", "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_async", "segments": [ { @@ -5189,18 +9670,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, + "description": "Sample for CreateDataLabelingJob", "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync", "segments": [ { @@ -5233,19 +9754,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, + "description": "Sample for CreateHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async", "segments": [ { @@ -5278,18 +9839,58 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, + "description": "Sample for CreateHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync", "segments": [ { @@ -5322,19 +9923,59 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, + "description": "Sample for CreateModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async", "segments": [ { @@ -5367,18 +10008,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "CreateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, + "description": "Sample for CreateModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", "segments": [ { @@ -5411,19 +10092,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_batch_prediction_job" }, + "description": "Sample for DeleteBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async", "segments": [ { @@ -5456,18 +10173,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_batch_prediction_job" }, + "description": "Sample for DeleteBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync", "segments": [ { @@ -5500,19 +10253,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_custom_job" }, + "description": "Sample for DeleteCustomJob", "file": "aiplatform_v1_generated_job_service_delete_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_async", "segments": [ { @@ -5545,18 +10334,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_custom_job" }, + "description": "Sample for DeleteCustomJob", "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", "segments": [ { @@ -5589,19 +10414,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" }, + "description": "Sample for DeleteDataLabelingJob", "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", "segments": [ { @@ -5634,18 +10495,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" }, + "description": "Sample for DeleteDataLabelingJob", "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", "segments": [ { @@ -5678,19 +10575,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_hyperparameter_tuning_job" }, + "description": "Sample for DeleteHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", "segments": [ { @@ -5723,18 +10656,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_hyperparameter_tuning_job" }, + "description": "Sample for DeleteHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", "segments": [ { @@ -5767,19 +10736,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_deployment_monitoring_job" }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", "segments": [ { @@ -5812,18 +10817,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "DeleteModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_deployment_monitoring_job" }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", "segments": [ { @@ -5856,19 +10897,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, + "description": "Sample for GetBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", "segments": [ { @@ -5901,18 +10978,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, + "description": "Sample for GetBatchPredictionJob", "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", "segments": [ { @@ -5945,19 +11058,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, + "description": "Sample for GetCustomJob", "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", "segments": [ { @@ -5990,18 +11139,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, + "description": "Sample for GetCustomJob", "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", "segments": [ { @@ -6034,19 +11219,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, + "description": "Sample for GetDataLabelingJob", "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", "segments": [ { @@ -6079,18 +11300,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, + "description": "Sample for GetDataLabelingJob", "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", "segments": [ { @@ -6123,19 +11380,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, + "description": "Sample for GetHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", "segments": [ { @@ -6168,18 +11461,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, + "description": "Sample for GetHyperparameterTuningJob", "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", "segments": [ { @@ -6212,19 +11541,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, + "description": "Sample for GetModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", "segments": [ { @@ -6257,18 +11622,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "GetModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, + "description": "Sample for GetModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6301,19 +11702,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListBatchPredictionJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" }, + "description": "Sample for ListBatchPredictionJobs", "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", "segments": [ { @@ -6346,18 +11783,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListBatchPredictionJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" }, + "description": "Sample for ListBatchPredictionJobs", "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", "segments": [ { @@ -6390,19 +11863,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListCustomJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" }, + "description": "Sample for ListCustomJobs", "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", "segments": [ { @@ -6435,18 +11944,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListCustomJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" }, + "description": "Sample for ListCustomJobs", "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", "segments": [ { @@ -6479,19 +12024,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListDataLabelingJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" }, + "description": "Sample for ListDataLabelingJobs", "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", "segments": [ { @@ -6524,18 +12105,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListDataLabelingJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" }, + "description": "Sample for ListDataLabelingJobs", "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", "segments": [ { @@ -6568,19 +12185,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListHyperparameterTuningJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" }, + "description": "Sample for ListHyperparameterTuningJobs", "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", "segments": [ { @@ -6613,18 +12266,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListHyperparameterTuningJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" }, + "description": "Sample for ListHyperparameterTuningJobs", "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", "segments": [ { @@ -6657,19 +12346,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListModelDeploymentMonitoringJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" }, + "description": "Sample for ListModelDeploymentMonitoringJobs", "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", "segments": [ { @@ -6702,18 +12427,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ListModelDeploymentMonitoringJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" }, + "description": "Sample for ListModelDeploymentMonitoringJobs", "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", "segments": [ { @@ -6746,19 +12507,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "PauseModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" }, + "description": "Sample for PauseModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", "segments": [ { @@ -6789,18 +12585,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "PauseModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" }, + "description": "Sample for PauseModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6831,19 +12662,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ResumeModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", "segments": [ { @@ -6874,18 +12740,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "ResumeModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6916,19 +12817,59 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", "segments": [ { @@ -6961,18 +12902,58 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", "segments": [ { @@ -7005,19 +12986,59 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "UpdateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", "segments": [ { @@ -7050,18 +13071,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, "shortName": "UpdateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", "segments": [ { @@ -7094,19 +13155,63 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextArtifactsAndExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, + "description": "Sample for AddContextArtifactsAndExecutions", "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async", "segments": [ { @@ -7139,18 +13244,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_artifacts_and_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextArtifactsAndExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, + "description": "Sample for AddContextArtifactsAndExecutions", "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", "segments": [ { @@ -7183,19 +13332,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_children", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextChildren" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, + "description": "Sample for AddContextChildren", "file": "aiplatform_v1_generated_metadata_service_add_context_children_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_async", "segments": [ { @@ -7228,18 +13417,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_children_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_children", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextChildren" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, + "description": "Sample for AddContextChildren", "file": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_sync", "segments": [ { @@ -7272,19 +13501,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_execution_events", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddExecutionEvents" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, + "description": "Sample for AddExecutionEvents", "file": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_async", "segments": [ { @@ -7317,18 +13586,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_execution_events", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddExecutionEvents" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, + "description": "Sample for AddExecutionEvents", "file": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync", "segments": [ { @@ -7361,19 +13670,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" }, + "description": "Sample for CreateArtifact", "file": "aiplatform_v1_generated_metadata_service_create_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_async", "segments": [ { @@ -7406,18 +13759,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" }, + "description": "Sample for CreateArtifact", "file": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_sync", "segments": [ { @@ -7450,19 +13847,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" }, + "description": "Sample for CreateContext", "file": "aiplatform_v1_generated_metadata_service_create_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_async", "segments": [ { @@ -7495,18 +13936,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" }, + "description": "Sample for CreateContext", "file": "aiplatform_v1_generated_metadata_service_create_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_sync", "segments": [ { @@ -7539,19 +14024,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" }, + "description": "Sample for CreateExecution", "file": "aiplatform_v1_generated_metadata_service_create_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_async", "segments": [ { @@ -7584,18 +14113,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" }, + "description": "Sample for CreateExecution", "file": "aiplatform_v1_generated_metadata_service_create_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_sync", "segments": [ { @@ -7628,19 +14201,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, + "description": "Sample for CreateMetadataSchema", "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async", "segments": [ { @@ -7673,18 +14290,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, + "description": "Sample for CreateMetadataSchema", "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync", "segments": [ { @@ -7717,19 +14378,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_metadata_store" }, + "description": "Sample for CreateMetadataStore", "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_async", "segments": [ { @@ -7762,18 +14467,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_metadata_store" }, + "description": "Sample for CreateMetadataStore", "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync", "segments": [ { @@ -7806,19 +14555,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" }, + "description": "Sample for DeleteArtifact", "file": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_async", "segments": [ { @@ -7851,18 +14636,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" }, + "description": "Sample for DeleteArtifact", "file": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_sync", "segments": [ { @@ -7895,19 +14716,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" }, + "description": "Sample for DeleteContext", "file": "aiplatform_v1_generated_metadata_service_delete_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_async", "segments": [ { @@ -7940,18 +14797,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" }, + "description": "Sample for DeleteContext", "file": "aiplatform_v1_generated_metadata_service_delete_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_sync", "segments": [ { @@ -7984,19 +14877,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" }, + "description": "Sample for DeleteExecution", "file": "aiplatform_v1_generated_metadata_service_delete_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_async", "segments": [ { @@ -8029,18 +14958,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" }, + "description": "Sample for DeleteExecution", "file": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_sync", "segments": [ { @@ -8073,19 +15038,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" }, + "description": "Sample for DeleteMetadataStore", "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async", "segments": [ { @@ -8118,18 +15119,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" }, + "description": "Sample for DeleteMetadataStore", "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync", "segments": [ { @@ -8162,19 +15199,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" }, + "description": "Sample for GetArtifact", "file": "aiplatform_v1_generated_metadata_service_get_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_async", "segments": [ { @@ -8207,18 +15280,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" }, + "description": "Sample for GetArtifact", "file": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_sync", "segments": [ { @@ -8251,19 +15360,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" }, + "description": "Sample for GetContext", "file": "aiplatform_v1_generated_metadata_service_get_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_async", "segments": [ { @@ -8296,18 +15441,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" }, + "description": "Sample for GetContext", "file": "aiplatform_v1_generated_metadata_service_get_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_sync", "segments": [ { @@ -8340,19 +15521,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" }, + "description": "Sample for GetExecution", "file": "aiplatform_v1_generated_metadata_service_get_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_async", "segments": [ { @@ -8385,18 +15602,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" }, + "description": "Sample for GetExecution", "file": "aiplatform_v1_generated_metadata_service_get_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_sync", "segments": [ { @@ -8429,19 +15682,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, + "description": "Sample for GetMetadataSchema", "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_async", "segments": [ { @@ -8474,18 +15763,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, + "description": "Sample for GetMetadataSchema", "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync", "segments": [ { @@ -8518,19 +15843,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" }, + "description": "Sample for GetMetadataStore", "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_async", "segments": [ { @@ -8563,18 +15924,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" }, + "description": "Sample for GetMetadataStore", "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_sync", "segments": [ { @@ -8607,19 +16004,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" }, + "description": "Sample for ListArtifacts", "file": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_async", "segments": [ { @@ -8652,18 +16085,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" }, + "description": "Sample for ListArtifacts", "file": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_sync", "segments": [ { @@ -8696,19 +16165,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" }, + "description": "Sample for ListContexts", "file": "aiplatform_v1_generated_metadata_service_list_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_async", "segments": [ { @@ -8741,18 +16246,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_contexts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" }, + "description": "Sample for ListContexts", "file": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_sync", "segments": [ { @@ -8785,19 +16326,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" }, + "description": "Sample for ListExecutions", "file": "aiplatform_v1_generated_metadata_service_list_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_async", "segments": [ { @@ -8830,18 +16407,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" }, + "description": "Sample for ListExecutions", "file": "aiplatform_v1_generated_metadata_service_list_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_sync", "segments": [ { @@ -8874,19 +16487,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_schemas", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataSchemas" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" }, + "description": "Sample for ListMetadataSchemas", "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async", "segments": [ { @@ -8919,18 +16568,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_schemas", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataSchemas" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" }, + "description": "Sample for ListMetadataSchemas", "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync", "segments": [ { @@ -8963,19 +16648,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_stores", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataStores" - } - }, - "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py", + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_async", "segments": [ { @@ -9008,18 +16729,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_stores", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataStores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" }, + "description": "Sample for ListMetadataStores", "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_sync", "segments": [ { @@ -9052,19 +16809,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" }, + "description": "Sample for PurgeArtifacts", "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_async", "segments": [ { @@ -9097,18 +16890,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" }, + "description": "Sample for PurgeArtifacts", "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync", "segments": [ { @@ -9141,19 +16970,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" }, + "description": "Sample for PurgeContexts", "file": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_async", "segments": [ { @@ -9186,18 +17051,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" }, + "description": "Sample for PurgeContexts", "file": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_sync", "segments": [ { @@ -9230,19 +17131,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" }, + "description": "Sample for PurgeExecutions", "file": "aiplatform_v1_generated_metadata_service_purge_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_async", "segments": [ { @@ -9275,18 +17212,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_executions", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" }, + "description": "Sample for PurgeExecutions", "file": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_sync", "segments": [ { @@ -9319,19 +17292,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryArtifactLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, + "description": "Sample for QueryArtifactLineageSubgraph", "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async", "segments": [ { @@ -9364,18 +17373,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_artifact_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryArtifactLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, + "description": "Sample for QueryArtifactLineageSubgraph", "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", "segments": [ { @@ -9408,19 +17453,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_context_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryContextLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, + "description": "Sample for QueryContextLineageSubgraph", "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async", "segments": [ { @@ -9453,18 +17534,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_context_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryContextLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, + "description": "Sample for QueryContextLineageSubgraph", "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync", "segments": [ { @@ -9497,19 +17614,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryExecutionInputsAndOutputs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, + "description": "Sample for QueryExecutionInputsAndOutputs", "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", "segments": [ { @@ -9542,18 +17695,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_execution_inputs_and_outputs", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryExecutionInputsAndOutputs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, + "description": "Sample for QueryExecutionInputsAndOutputs", "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", "segments": [ { @@ -9586,19 +17775,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" }, + "description": "Sample for UpdateArtifact", "file": "aiplatform_v1_generated_metadata_service_update_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_async", "segments": [ { @@ -9631,18 +17860,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" }, + "description": "Sample for UpdateArtifact", "file": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_sync", "segments": [ { @@ -9675,19 +17944,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" }, + "description": "Sample for UpdateContext", "file": "aiplatform_v1_generated_metadata_service_update_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_async", "segments": [ { @@ -9720,18 +18029,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_context", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" }, + "description": "Sample for UpdateContext", "file": "aiplatform_v1_generated_metadata_service_update_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_sync", "segments": [ { @@ -9764,19 +18113,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" }, + "description": "Sample for UpdateExecution", "file": "aiplatform_v1_generated_metadata_service_update_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_async", "segments": [ { @@ -9809,18 +18198,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_execution", "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" }, + "description": "Sample for UpdateExecution", "file": "aiplatform_v1_generated_metadata_service_update_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_sync", "segments": [ { @@ -9853,19 +18282,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.batch_migrate_resources", "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", "shortName": "MigrationService" }, "shortName": "BatchMigrateResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" }, + "description": "Sample for BatchMigrateResources", "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_async", "segments": [ { @@ -9898,18 +18367,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.batch_migrate_resources", "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", "shortName": "MigrationService" }, "shortName": "BatchMigrateResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" }, + "description": "Sample for BatchMigrateResources", "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync", "segments": [ { @@ -9942,19 +18451,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.search_migratable_resources", "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", "shortName": "MigrationService" }, "shortName": "SearchMigratableResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" }, + "description": "Sample for SearchMigratableResources", "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_async", "segments": [ { @@ -9987,18 +18532,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.search_migratable_resources", "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", "shortName": "MigrationService" }, "shortName": "SearchMigratableResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" }, + "description": "Sample for SearchMigratableResources", "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync", "segments": [ { @@ -10031,19 +18612,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "DeleteModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" }, + "description": "Sample for DeleteModel", "file": "aiplatform_v1_generated_model_service_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_async", "segments": [ { @@ -10076,18 +18693,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_delete_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "DeleteModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" }, + "description": "Sample for DeleteModel", "file": "aiplatform_v1_generated_model_service_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_sync", "segments": [ { @@ -10120,19 +18773,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_delete_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.export_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ExportModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" }, + "description": "Sample for ExportModel", "file": "aiplatform_v1_generated_model_service_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_async", "segments": [ { @@ -10165,18 +18858,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_export_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.export_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ExportModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" }, + "description": "Sample for ExportModel", "file": "aiplatform_v1_generated_model_service_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_sync", "segments": [ { @@ -10209,19 +18942,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_export_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation_slice", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluationSlice" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" }, + "description": "Sample for GetModelEvaluationSlice", "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async", "segments": [ { @@ -10254,18 +19023,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation_slice", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluationSlice" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" }, + "description": "Sample for GetModelEvaluationSlice", "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync", "segments": [ { @@ -10298,19 +19103,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, + "description": "Sample for GetModelEvaluation", "file": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_async", "segments": [ { @@ -10343,18 +19184,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, + "description": "Sample for GetModelEvaluation", "file": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_sync", "segments": [ { @@ -10387,19 +19264,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" }, + "description": "Sample for GetModel", "file": "aiplatform_v1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModel_async", "segments": [ { @@ -10432,18 +19345,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "GetModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" }, + "description": "Sample for GetModel", "file": "aiplatform_v1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_GetModel_sync", "segments": [ { @@ -10476,19 +19425,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_get_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.import_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ImportModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, + "description": "Sample for ImportModelEvaluation", "file": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_async", "segments": [ { @@ -10521,18 +19510,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.import_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ImportModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, + "description": "Sample for ImportModelEvaluation", "file": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync", "segments": [ { @@ -10565,19 +19594,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluation_slices", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModelEvaluationSlices" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" }, + "description": "Sample for ListModelEvaluationSlices", "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async", "segments": [ { @@ -10610,18 +19675,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluation_slices", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModelEvaluationSlices" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" }, + "description": "Sample for ListModelEvaluationSlices", "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync", "segments": [ { @@ -10654,19 +19755,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluations", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModelEvaluations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" }, + "description": "Sample for ListModelEvaluations", "file": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_async", "segments": [ { @@ -10699,18 +19836,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluations", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModelEvaluations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" }, + "description": "Sample for ListModelEvaluations", "file": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_sync", "segments": [ { @@ -10743,19 +19916,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_models", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModels" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" }, + "description": "Sample for ListModels", "file": "aiplatform_v1_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModels_async", "segments": [ { @@ -10788,18 +19997,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_models_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_models", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "ListModels" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" }, + "description": "Sample for ListModels", "file": "aiplatform_v1_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_ListModels_sync", "segments": [ { @@ -10832,19 +20077,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_list_models_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "UpdateModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" }, + "description": "Sample for UpdateModel", "file": "aiplatform_v1_generated_model_service_update_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_async", "segments": [ { @@ -10877,18 +20162,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_update_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "UpdateModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" }, + "description": "Sample for UpdateModel", "file": "aiplatform_v1_generated_model_service_update_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_sync", "segments": [ { @@ -10921,19 +20246,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_update_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.upload_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "UploadModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" }, + "description": "Sample for UploadModel", "file": "aiplatform_v1_generated_model_service_upload_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_async", "segments": [ { @@ -10966,18 +20331,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_upload_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.upload_model", "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, "shortName": "UploadModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" }, + "description": "Sample for UploadModel", "file": "aiplatform_v1_generated_model_service_upload_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_sync", "segments": [ { @@ -11010,19 +20415,54 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_model_service_upload_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.cancel_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" }, + "description": "Sample for CancelPipelineJob", "file": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CancelPipelineJob_async", "segments": [ { @@ -11053,18 +20493,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.cancel_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" }, + "description": "Sample for CancelPipelineJob", "file": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CancelPipelineJob_sync", "segments": [ { @@ -11095,19 +20570,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.cancel_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" }, + "description": "Sample for CancelTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_async", "segments": [ { @@ -11138,18 +20648,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.cancel_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" }, + "description": "Sample for CancelTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_sync", "segments": [ { @@ -11180,19 +20725,63 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.create_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreatePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "create_pipeline_job" }, + "description": "Sample for CreatePipelineJob", "file": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CreatePipelineJob_async", "segments": [ { @@ -11225,18 +20814,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.create_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreatePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "create_pipeline_job" }, + "description": "Sample for CreatePipelineJob", "file": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CreatePipelineJob_sync", "segments": [ { @@ -11269,19 +20902,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.create_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreateTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, + "description": "Sample for CreateTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_async", "segments": [ { @@ -11314,18 +20987,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.create_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreateTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, + "description": "Sample for CreateTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_sync", "segments": [ { @@ -11358,19 +21071,55 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.delete_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeletePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_pipeline_job" }, + "description": "Sample for DeletePipelineJob", "file": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_DeletePipelineJob_async", "segments": [ { @@ -11403,18 +21152,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.delete_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeletePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_pipeline_job" }, + "description": "Sample for DeletePipelineJob", "file": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_DeletePipelineJob_sync", "segments": [ { @@ -11447,19 +21232,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.delete_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeleteTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_training_pipeline" }, + "description": "Sample for DeleteTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_async", "segments": [ { @@ -11492,18 +21313,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.delete_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeleteTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_training_pipeline" }, + "description": "Sample for DeleteTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync", "segments": [ { @@ -11536,19 +21393,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.get_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "get_pipeline_job" }, + "description": "Sample for GetPipelineJob", "file": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_GetPipelineJob_async", "segments": [ { @@ -11581,18 +21474,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.get_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "get_pipeline_job" }, + "description": "Sample for GetPipelineJob", "file": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_GetPipelineJob_sync", "segments": [ { @@ -11625,19 +21554,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.get_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, + "description": "Sample for GetTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_GetTrainingPipeline_async", "segments": [ { @@ -11670,18 +21635,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.get_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, + "description": "Sample for GetTrainingPipeline", "file": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_GetTrainingPipeline_sync", "segments": [ { @@ -11714,19 +21715,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.list_pipeline_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListPipelineJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", + "shortName": "list_pipeline_jobs" }, + "description": "Sample for ListPipelineJobs", "file": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_ListPipelineJobs_async", "segments": [ { @@ -11759,18 +21796,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.list_pipeline_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListPipelineJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager", + "shortName": "list_pipeline_jobs" }, + "description": "Sample for ListPipelineJobs", "file": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_ListPipelineJobs_sync", "segments": [ { @@ -11803,19 +21876,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.list_training_pipelines", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListTrainingPipelines" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", + "shortName": "list_training_pipelines" }, + "description": "Sample for ListTrainingPipelines", "file": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_ListTrainingPipelines_async", "segments": [ { @@ -11848,18 +21957,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.list_training_pipelines", "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines", "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListTrainingPipelines" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager", + "shortName": "list_training_pipelines" }, + "description": "Sample for ListTrainingPipelines", "file": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PipelineService_ListTrainingPipelines_sync", "segments": [ { @@ -11892,19 +22037,67 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.explain", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Explain", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "Explain" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ExplainResponse", + "shortName": "explain" }, + "description": "Sample for Explain", "file": "aiplatform_v1_generated_prediction_service_explain_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_Explain_async", "segments": [ { @@ -11937,18 +22130,66 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_explain_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.explain", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Explain", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "Explain" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ExplainResponse", + "shortName": "explain" }, + "description": "Sample for Explain", "file": "aiplatform_v1_generated_prediction_service_explain_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_Explain_sync", "segments": [ { @@ -11981,19 +22222,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_explain_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.predict", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Predict", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "Predict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PredictResponse", + "shortName": "predict" }, + "description": "Sample for Predict", "file": "aiplatform_v1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_Predict_async", "segments": [ { @@ -12026,18 +22311,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_predict_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.predict", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Predict", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "Predict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PredictResponse", + "shortName": "predict" }, + "description": "Sample for Predict", "file": "aiplatform_v1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_Predict_sync", "segments": [ { @@ -12070,19 +22399,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_predict_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.raw_predict", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.RawPredict", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "RawPredict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, + "description": "Sample for RawPredict", "file": "aiplatform_v1_generated_prediction_service_raw_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_RawPredict_async", "segments": [ { @@ -12115,18 +22484,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_raw_predict_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.raw_predict", "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.RawPredict", "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", "shortName": "PredictionService" }, "shortName": "RawPredict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, + "description": "Sample for RawPredict", "file": "aiplatform_v1_generated_prediction_service_raw_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_PredictionService_RawPredict_sync", "segments": [ { @@ -12159,19 +22568,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_prediction_service_raw_predict_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.create_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "CreateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_specialist_pool" }, + "description": "Sample for CreateSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async", "segments": [ { @@ -12204,18 +22653,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.create_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "CreateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_specialist_pool" }, + "description": "Sample for CreateSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_sync", "segments": [ { @@ -12248,19 +22737,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "DeleteSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_specialist_pool" }, + "description": "Sample for DeleteSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_async", "segments": [ { @@ -12293,18 +22818,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.delete_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "DeleteSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_specialist_pool" }, + "description": "Sample for DeleteSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", "segments": [ { @@ -12337,19 +22898,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.get_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "GetSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, + "description": "Sample for GetSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_async", "segments": [ { @@ -12382,18 +22979,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.get_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "GetSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, + "description": "Sample for GetSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_sync", "segments": [ { @@ -12426,19 +23059,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.list_specialist_pools", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "ListSpecialistPools" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", + "shortName": "list_specialist_pools" }, + "description": "Sample for ListSpecialistPools", "file": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_async", "segments": [ { @@ -12471,18 +23140,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.list_specialist_pools", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "ListSpecialistPools" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", + "shortName": "list_specialist_pools" }, + "description": "Sample for ListSpecialistPools", "file": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_sync", "segments": [ { @@ -12515,19 +23220,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.update_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "UpdateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_specialist_pool" }, + "description": "Sample for UpdateSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_async", "segments": [ { @@ -12560,18 +23305,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.update_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "UpdateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_specialist_pool" }, + "description": "Sample for UpdateSpecialistPool", "file": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", "segments": [ { @@ -12604,19 +23389,59 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, + "description": "Sample for BatchCreateTensorboardRuns", "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_async", "segments": [ { @@ -12649,18 +23474,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_create_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, + "description": "Sample for BatchCreateTensorboardRuns", "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", "segments": [ { @@ -12693,19 +23558,59 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, + "description": "Sample for BatchCreateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", "segments": [ { @@ -12738,18 +23643,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, + "description": "Sample for BatchCreateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", "segments": [ { @@ -12782,19 +23727,55 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", "segments": [ { @@ -12827,18 +23808,54 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", "segments": [ { @@ -12871,19 +23888,63 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, + "description": "Sample for CreateTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_async", "segments": [ { @@ -12916,18 +23977,62 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, + "description": "Sample for CreateTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_sync", "segments": [ { @@ -12960,19 +24065,63 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, + "description": "Sample for CreateTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_async", "segments": [ { @@ -13005,18 +24154,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, + "description": "Sample for CreateTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_sync", "segments": [ { @@ -13049,19 +24242,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, + "description": "Sample for CreateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_async", "segments": [ { @@ -13094,18 +24327,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, + "description": "Sample for CreateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", "segments": [ { @@ -13138,19 +24411,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_tensorboard" }, + "description": "Sample for CreateTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboard_async", "segments": [ { @@ -13183,18 +24496,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_tensorboard" }, + "description": "Sample for CreateTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboard_sync", "segments": [ { @@ -13227,19 +24580,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_experiment" }, + "description": "Sample for DeleteTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_async", "segments": [ { @@ -13272,18 +24661,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_experiment" }, + "description": "Sample for DeleteTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_sync", "segments": [ { @@ -13316,19 +24741,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_run" }, + "description": "Sample for DeleteTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_async", "segments": [ { @@ -13361,18 +24822,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_run" }, + "description": "Sample for DeleteTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_sync", "segments": [ { @@ -13405,19 +24902,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_time_series" }, + "description": "Sample for DeleteTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", "segments": [ { @@ -13450,18 +24983,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_time_series" }, + "description": "Sample for DeleteTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", "segments": [ { @@ -13494,19 +25063,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard" }, + "description": "Sample for DeleteTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboard_async", "segments": [ { @@ -13539,18 +25144,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard" }, + "description": "Sample for DeleteTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboard_sync", "segments": [ { @@ -13583,19 +25224,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ExportTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", + "shortName": "export_tensorboard_time_series_data" }, + "description": "Sample for ExportTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", "segments": [ { @@ -13628,18 +25305,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.export_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ExportTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", + "shortName": "export_tensorboard_time_series_data" }, + "description": "Sample for ExportTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", "segments": [ { @@ -13672,19 +25385,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, + "description": "Sample for GetTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_async", "segments": [ { @@ -13717,18 +25466,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, + "description": "Sample for GetTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_sync", "segments": [ { @@ -13761,19 +25546,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, + "description": "Sample for GetTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardRun_async", "segments": [ { @@ -13806,18 +25627,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, + "description": "Sample for GetTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardRun_sync", "segments": [ { @@ -13850,19 +25707,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, + "description": "Sample for GetTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_async", "segments": [ { @@ -13895,18 +25788,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, + "description": "Sample for GetTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_sync", "segments": [ { @@ -13939,19 +25868,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Tensorboard", + "shortName": "get_tensorboard" }, + "description": "Sample for GetTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboard_async", "segments": [ { @@ -13984,18 +25949,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Tensorboard", + "shortName": "get_tensorboard" }, + "description": "Sample for GetTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboard_sync", "segments": [ { @@ -14028,19 +26029,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_experiments", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardExperiments" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", + "shortName": "list_tensorboard_experiments" }, + "description": "Sample for ListTensorboardExperiments", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_async", "segments": [ { @@ -14073,18 +26110,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_experiments", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardExperiments" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", + "shortName": "list_tensorboard_experiments" }, + "description": "Sample for ListTensorboardExperiments", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_sync", "segments": [ { @@ -14117,19 +26190,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", + "shortName": "list_tensorboard_runs" }, + "description": "Sample for ListTensorboardRuns", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_async", "segments": [ { @@ -14162,18 +26271,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsPager", + "shortName": "list_tensorboard_runs" }, + "description": "Sample for ListTensorboardRuns", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_sync", "segments": [ { @@ -14206,19 +26351,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", + "shortName": "list_tensorboard_time_series" }, + "description": "Sample for ListTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_async", "segments": [ { @@ -14251,18 +26432,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", + "shortName": "list_tensorboard_time_series" }, + "description": "Sample for ListTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_sync", "segments": [ { @@ -14295,19 +26512,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboards", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboards", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboards" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", + "shortName": "list_tensorboards" }, + "description": "Sample for ListTensorboards", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboards_async", "segments": [ { @@ -14340,18 +26593,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboards", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboards", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboards" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsPager", + "shortName": "list_tensorboards" }, + "description": "Sample for ListTensorboards", "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboards_sync", "segments": [ { @@ -14384,19 +26673,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardBlobData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, + "description": "Sample for ReadTensorboardBlobData", "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_async", "segments": [ { @@ -14429,18 +26754,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.read_tensorboard_blob_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardBlobData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, + "description": "Sample for ReadTensorboardBlobData", "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync", "segments": [ { @@ -14473,19 +26834,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, + "description": "Sample for ReadTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", "segments": [ { @@ -14518,18 +26915,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, + "description": "Sample for ReadTensorboardTimeSeriesData", "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", "segments": [ { @@ -14562,19 +26995,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, + "description": "Sample for UpdateTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_async", "segments": [ { @@ -14607,18 +27080,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, + "description": "Sample for UpdateTensorboardExperiment", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_sync", "segments": [ { @@ -14651,19 +27164,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, + "description": "Sample for UpdateTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_async", "segments": [ { @@ -14696,18 +27249,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, + "description": "Sample for UpdateTensorboardRun", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_sync", "segments": [ { @@ -14740,19 +27333,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" }, + "description": "Sample for UpdateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", "segments": [ { @@ -14785,18 +27418,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "UpdateTensorboardTimeSeries" - } + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" }, + "description": "Sample for UpdateTensorboardTimeSeries", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", "segments": [ { @@ -14829,19 +27502,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_tensorboard" }, + "description": "Sample for UpdateTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboard_async", "segments": [ { @@ -14874,18 +27587,58 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_tensorboard" }, + "description": "Sample for UpdateTensorboard", "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboard_sync", "segments": [ { @@ -14918,19 +27671,59 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardExperimentData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, + "description": "Sample for WriteTensorboardExperimentData", "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_async", "segments": [ { @@ -14963,18 +27756,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.write_tensorboard_experiment_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardExperimentData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, + "description": "Sample for WriteTensorboardExperimentData", "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_sync", "segments": [ { @@ -15007,19 +27840,59 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.write_tensorboard_run_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardRunData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, + "description": "Sample for WriteTensorboardRunData", "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_async", "segments": [ { @@ -15052,18 +27925,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.write_tensorboard_run_data", "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData", "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardRunData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, + "description": "Sample for WriteTensorboardRunData", "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_sync", "segments": [ { @@ -15096,19 +28009,51 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.add_trial_measurement", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "AddTrialMeasurement" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "add_trial_measurement" }, + "description": "Sample for AddTrialMeasurement", "file": "aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_AddTrialMeasurement_async", "segments": [ { @@ -15141,18 +28086,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.add_trial_measurement", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "AddTrialMeasurement" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "add_trial_measurement" }, + "description": "Sample for AddTrialMeasurement", "file": "aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_AddTrialMeasurement_sync", "segments": [ { @@ -15185,19 +28162,51 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.check_trial_early_stopping_state", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CheckTrialEarlyStoppingState" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "check_trial_early_stopping_state" }, + "description": "Sample for CheckTrialEarlyStoppingState", "file": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_async", "segments": [ { @@ -15230,18 +28239,50 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.check_trial_early_stopping_state", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CheckTrialEarlyStoppingState" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "check_trial_early_stopping_state" }, + "description": "Sample for CheckTrialEarlyStoppingState", "file": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_sync", "segments": [ { @@ -15274,19 +28315,51 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.complete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CompleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CompleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "complete_trial" }, + "description": "Sample for CompleteTrial", "file": "aiplatform_v1_generated_vizier_service_complete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CompleteTrial_async", "segments": [ { @@ -15319,18 +28392,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_complete_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.complete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CompleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CompleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "complete_trial" }, + "description": "Sample for CompleteTrial", "file": "aiplatform_v1_generated_vizier_service_complete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CompleteTrial_sync", "segments": [ { @@ -15363,19 +28468,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_complete_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.create_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CreateStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "create_study" }, + "description": "Sample for CreateStudy", "file": "aiplatform_v1_generated_vizier_service_create_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CreateStudy_async", "segments": [ { @@ -15408,18 +28553,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_create_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.create_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CreateStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "create_study" }, + "description": "Sample for CreateStudy", "file": "aiplatform_v1_generated_vizier_service_create_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CreateStudy_sync", "segments": [ { @@ -15452,19 +28637,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_create_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.create_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CreateTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "create_trial" }, + "description": "Sample for CreateTrial", "file": "aiplatform_v1_generated_vizier_service_create_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CreateTrial_async", "segments": [ { @@ -15497,18 +28722,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_create_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.create_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "CreateTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "create_trial" }, + "description": "Sample for CreateTrial", "file": "aiplatform_v1_generated_vizier_service_create_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_CreateTrial_sync", "segments": [ { @@ -15541,19 +28806,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_create_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.delete_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" }, + "description": "Sample for DeleteStudy", "file": "aiplatform_v1_generated_vizier_service_delete_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_DeleteStudy_async", "segments": [ { @@ -15584,18 +28884,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_delete_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.delete_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" }, + "description": "Sample for DeleteStudy", "file": "aiplatform_v1_generated_vizier_service_delete_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_DeleteStudy_sync", "segments": [ { @@ -15626,19 +28961,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_delete_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.delete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" }, + "description": "Sample for DeleteTrial", "file": "aiplatform_v1_generated_vizier_service_delete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_DeleteTrial_async", "segments": [ { @@ -15669,18 +29039,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_delete_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.delete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" }, + "description": "Sample for DeleteTrial", "file": "aiplatform_v1_generated_vizier_service_delete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_DeleteTrial_sync", "segments": [ { @@ -15711,19 +29116,55 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_delete_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.get_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "GetStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "get_study" }, + "description": "Sample for GetStudy", "file": "aiplatform_v1_generated_vizier_service_get_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_GetStudy_async", "segments": [ { @@ -15756,18 +29197,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_get_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.get_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "GetStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "get_study" }, + "description": "Sample for GetStudy", "file": "aiplatform_v1_generated_vizier_service_get_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_GetStudy_sync", "segments": [ { @@ -15800,19 +29277,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_get_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.get_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "GetTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "get_trial" }, + "description": "Sample for GetTrial", "file": "aiplatform_v1_generated_vizier_service_get_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_GetTrial_async", "segments": [ { @@ -15845,18 +29358,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_get_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.get_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "GetTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "get_trial" }, + "description": "Sample for GetTrial", "file": "aiplatform_v1_generated_vizier_service_get_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_GetTrial_sync", "segments": [ { @@ -15889,19 +29438,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_get_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_optimal_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListOptimalTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListOptimalTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" }, + "description": "Sample for ListOptimalTrials", "file": "aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListOptimalTrials_async", "segments": [ { @@ -15934,18 +29519,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_optimal_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListOptimalTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListOptimalTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" }, + "description": "Sample for ListOptimalTrials", "file": "aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListOptimalTrials_sync", "segments": [ { @@ -15978,19 +29599,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_studies", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListStudies", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListStudies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesAsyncPager", + "shortName": "list_studies" }, + "description": "Sample for ListStudies", "file": "aiplatform_v1_generated_vizier_service_list_studies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListStudies_async", "segments": [ { @@ -16023,18 +29680,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_studies_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_studies", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListStudies", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListStudies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesPager", + "shortName": "list_studies" }, + "description": "Sample for ListStudies", "file": "aiplatform_v1_generated_vizier_service_list_studies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListStudies_sync", "segments": [ { @@ -16067,19 +29760,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_studies_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsAsyncPager", + "shortName": "list_trials" }, + "description": "Sample for ListTrials", "file": "aiplatform_v1_generated_vizier_service_list_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListTrials_async", "segments": [ { @@ -16112,18 +29841,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "ListTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsPager", + "shortName": "list_trials" }, + "description": "Sample for ListTrials", "file": "aiplatform_v1_generated_vizier_service_list_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_ListTrials_sync", "segments": [ { @@ -16156,19 +29921,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_list_trials_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.lookup_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.LookupStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "LookupStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "lookup_study" }, + "description": "Sample for LookupStudy", "file": "aiplatform_v1_generated_vizier_service_lookup_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_LookupStudy_async", "segments": [ { @@ -16201,18 +30002,54 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_lookup_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.lookup_study", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.LookupStudy", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "LookupStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "lookup_study" }, + "description": "Sample for LookupStudy", "file": "aiplatform_v1_generated_vizier_service_lookup_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_LookupStudy_sync", "segments": [ { @@ -16245,19 +30082,51 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_lookup_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.stop_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.StopTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "StopTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "stop_trial" }, + "description": "Sample for StopTrial", "file": "aiplatform_v1_generated_vizier_service_stop_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_StopTrial_async", "segments": [ { @@ -16290,18 +30159,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_stop_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.stop_trial", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.StopTrial", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "StopTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "stop_trial" }, + "description": "Sample for StopTrial", "file": "aiplatform_v1_generated_vizier_service_stop_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_StopTrial_sync", "segments": [ { @@ -16334,19 +30235,51 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_stop_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.suggest_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.SuggestTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "SuggestTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "suggest_trials" }, + "description": "Sample for SuggestTrials", "file": "aiplatform_v1_generated_vizier_service_suggest_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_SuggestTrials_async", "segments": [ { @@ -16379,18 +30312,50 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_suggest_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.suggest_trials", "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.SuggestTrials", "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", "shortName": "VizierService" }, "shortName": "SuggestTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "suggest_trials" }, + "description": "Sample for SuggestTrials", "file": "aiplatform_v1_generated_vizier_service_suggest_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1_generated_VizierService_SuggestTrials_sync", "segments": [ { @@ -16423,7 +30388,8 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1_generated_vizier_service_suggest_trials_sync.py" } ] } diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json index d6c82957ab..99cdb25b68 100644 --- a/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json +++ b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json @@ -1,16 +1,65 @@ { + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.aiplatform.v1beta1", + "version": "v1beta1" + } + ], + "language": "PYTHON", + "name": "google-cloud-aiplatform" + }, "snippets": [ { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.create_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "CreateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_dataset" }, + "description": "Sample for CreateDataset", "file": "aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_CreateDataset_async", "segments": [ { @@ -43,18 +92,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.create_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "CreateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_dataset" }, + "description": "Sample for CreateDataset", "file": "aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_CreateDataset_sync", "segments": [ { @@ -87,19 +176,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.delete_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "DeleteDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" }, + "description": "Sample for DeleteDataset", "file": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_DeleteDataset_async", "segments": [ { @@ -132,18 +257,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.delete_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "DeleteDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" }, + "description": "Sample for DeleteDataset", "file": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_DeleteDataset_sync", "segments": [ { @@ -176,19 +337,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.export_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ExportData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ExportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" }, + "description": "Sample for ExportData", "file": "aiplatform_v1beta1_generated_dataset_service_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ExportData_async", "segments": [ { @@ -221,18 +422,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_export_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.export_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ExportData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ExportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" }, + "description": "Sample for ExportData", "file": "aiplatform_v1beta1_generated_dataset_service_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ExportData_sync", "segments": [ { @@ -265,19 +506,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_export_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.get_annotation_spec", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetAnnotationSpec" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" }, + "description": "Sample for GetAnnotationSpec", "file": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_async", "segments": [ { @@ -310,18 +587,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.get_annotation_spec", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetAnnotationSpec" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" }, + "description": "Sample for GetAnnotationSpec", "file": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_sync", "segments": [ { @@ -354,19 +667,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.get_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "get_dataset" }, + "description": "Sample for GetDataset", "file": "aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetDataset_async", "segments": [ { @@ -399,18 +748,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.get_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "GetDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "get_dataset" }, + "description": "Sample for GetDataset", "file": "aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetDataset_sync", "segments": [ { @@ -443,19 +828,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.import_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ImportData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ImportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" }, + "description": "Sample for ImportData", "file": "aiplatform_v1beta1_generated_dataset_service_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ImportData_async", "segments": [ { @@ -488,18 +913,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_import_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.import_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ImportData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ImportData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" }, + "description": "Sample for ImportData", "file": "aiplatform_v1beta1_generated_dataset_service_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ImportData_sync", "segments": [ { @@ -532,19 +997,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_import_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_annotations", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListAnnotations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager", + "shortName": "list_annotations" }, + "description": "Sample for ListAnnotations", "file": "aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListAnnotations_async", "segments": [ { @@ -577,18 +1078,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_annotations", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListAnnotations" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager", + "shortName": "list_annotations" }, + "description": "Sample for ListAnnotations", "file": "aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListAnnotations_sync", "segments": [ { @@ -621,19 +1158,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_data_items", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDataItems" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager", + "shortName": "list_data_items" }, + "description": "Sample for ListDataItems", "file": "aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDataItems_async", "segments": [ { @@ -666,18 +1239,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_data_items", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDataItems" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager", + "shortName": "list_data_items" }, + "description": "Sample for ListDataItems", "file": "aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDataItems_sync", "segments": [ { @@ -710,19 +1319,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_datasets", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDatasets" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" }, + "description": "Sample for ListDatasets", "file": "aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDatasets_async", "segments": [ { @@ -755,18 +1400,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_datasets", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "ListDatasets" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager", + "shortName": "list_datasets" }, + "description": "Sample for ListDatasets", "file": "aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDatasets_sync", "segments": [ { @@ -799,19 +1480,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.update_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "UpdateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "update_dataset" }, + "description": "Sample for UpdateDataset", "file": "aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_UpdateDataset_async", "segments": [ { @@ -844,18 +1565,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.update_dataset", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", "shortName": "DatasetService" }, "shortName": "UpdateDataset" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "update_dataset" }, + "description": "Sample for UpdateDataset", "file": "aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_DatasetService_UpdateDataset_sync", "segments": [ { @@ -888,19 +1649,63 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.create_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "CreateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_endpoint" }, + "description": "Sample for CreateEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_async", "segments": [ { @@ -933,18 +1738,62 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.create_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "CreateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_endpoint" }, + "description": "Sample for CreateEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_sync", "segments": [ { @@ -977,19 +1826,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.delete_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeleteEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_endpoint" }, + "description": "Sample for DeleteEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_async", "segments": [ { @@ -1022,18 +1907,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.delete_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeleteEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_endpoint" }, + "description": "Sample for DeleteEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_sync", "segments": [ { @@ -1066,19 +1987,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.deploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" }, + "description": "Sample for DeployModel", "file": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeployModel_async", "segments": [ { @@ -1111,18 +2076,62 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.deploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "DeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" }, + "description": "Sample for DeployModel", "file": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeployModel_sync", "segments": [ { @@ -1155,19 +2164,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.get_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "GetEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "get_endpoint" }, + "description": "Sample for GetEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_GetEndpoint_async", "segments": [ { @@ -1200,18 +2245,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.get_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "GetEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "get_endpoint" }, + "description": "Sample for GetEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_GetEndpoint_sync", "segments": [ { @@ -1244,19 +2325,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.list_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "ListEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager", + "shortName": "list_endpoints" }, + "description": "Sample for ListEndpoints", "file": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_ListEndpoints_async", "segments": [ { @@ -1289,18 +2406,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.list_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "ListEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager", + "shortName": "list_endpoints" }, + "description": "Sample for ListEndpoints", "file": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync", "segments": [ { @@ -1333,19 +2486,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.undeploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "UndeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" }, + "description": "Sample for UndeployModel", "file": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_UndeployModel_async", "segments": [ { @@ -1378,18 +2575,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.undeploy_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "UndeployModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" }, + "description": "Sample for UndeployModel", "file": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_UndeployModel_sync", "segments": [ { @@ -1422,19 +2663,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.update_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "UpdateEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "update_endpoint" }, + "description": "Sample for UpdateEndpoint", "file": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_async", "segments": [ { @@ -1467,18 +2748,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.update_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", "shortName": "EndpointService" }, "shortName": "UpdateEndpoint" - } - }, - "file": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py", + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_sync", "segments": [ { @@ -1511,19 +2832,55 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "ReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" }, + "description": "Sample for ReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", "segments": [ { @@ -1556,18 +2913,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "ReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" }, + "description": "Sample for ReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", "segments": [ { @@ -1600,19 +2993,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "StreamingReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" }, + "description": "Sample for StreamingReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", "segments": [ { @@ -1645,18 +3074,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", "shortName": "FeaturestoreOnlineServingService" }, "shortName": "StreamingReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" }, + "description": "Sample for StreamingReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", "segments": [ { @@ -1689,19 +3154,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_create_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchCreateFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_create_features" }, + "description": "Sample for BatchCreateFeatures", "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async", "segments": [ { @@ -1734,18 +3239,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_create_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchCreateFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_create_features" }, + "description": "Sample for BatchCreateFeatures", "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync", "segments": [ { @@ -1778,19 +3323,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_read_feature_values" }, + "description": "Sample for BatchReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async", "segments": [ { @@ -1823,18 +3404,54 @@ "start": 59, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_read_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "BatchReadFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_read_feature_values" }, + "description": "Sample for BatchReadFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync", "segments": [ { @@ -1867,19 +3484,63 @@ "start": 59, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_entity_type" }, + "description": "Sample for CreateEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async", "segments": [ { @@ -1912,18 +3573,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_entity_type" }, + "description": "Sample for CreateEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync", "segments": [ { @@ -1956,19 +3661,63 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature" }, + "description": "Sample for CreateFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async", "segments": [ { @@ -2001,18 +3750,62 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature" }, + "description": "Sample for CreateFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync", "segments": [ { @@ -2045,19 +3838,63 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_featurestore" }, + "description": "Sample for CreateFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async", "segments": [ { @@ -2090,18 +3927,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "CreateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_featurestore" }, + "description": "Sample for CreateFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync", "segments": [ { @@ -2134,19 +4015,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_entity_type" }, + "description": "Sample for DeleteEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async", "segments": [ { @@ -2179,18 +4100,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_entity_type" }, + "description": "Sample for DeleteEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync", "segments": [ { @@ -2223,19 +4184,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" }, + "description": "Sample for DeleteFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async", "segments": [ { @@ -2268,18 +4265,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" }, + "description": "Sample for DeleteFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync", "segments": [ { @@ -2312,19 +4345,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_featurestore" }, + "description": "Sample for DeleteFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async", "segments": [ { @@ -2357,18 +4430,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "DeleteFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_featurestore" }, + "description": "Sample for DeleteFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync", "segments": [ { @@ -2401,19 +4514,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.export_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ExportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_feature_values" }, + "description": "Sample for ExportFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async", "segments": [ { @@ -2446,18 +4595,54 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.export_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ExportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_feature_values" }, + "description": "Sample for ExportFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync", "segments": [ { @@ -2490,19 +4675,55 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" }, + "description": "Sample for GetEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async", "segments": [ { @@ -2535,18 +4756,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" }, + "description": "Sample for GetEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync", "segments": [ { @@ -2579,19 +4836,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" }, + "description": "Sample for GetFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async", "segments": [ { @@ -2624,18 +4917,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" }, + "description": "Sample for GetFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync", "segments": [ { @@ -2668,19 +4997,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" }, + "description": "Sample for GetFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async", "segments": [ { @@ -2713,18 +5078,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "GetFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" }, + "description": "Sample for GetFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync", "segments": [ { @@ -2757,19 +5158,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.import_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ImportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_feature_values" }, + "description": "Sample for ImportFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async", "segments": [ { @@ -2802,18 +5239,54 @@ "start": 55, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.import_feature_values", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ImportFeatureValues" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_feature_values" }, + "description": "Sample for ImportFeatureValues", "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync", "segments": [ { @@ -2846,19 +5319,55 @@ "start": 55, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_entity_types", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListEntityTypes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", + "shortName": "list_entity_types" }, + "description": "Sample for ListEntityTypes", "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async", "segments": [ { @@ -2891,18 +5400,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_entity_types", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListEntityTypes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager", + "shortName": "list_entity_types" }, + "description": "Sample for ListEntityTypes", "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync", "segments": [ { @@ -2935,21 +5480,57 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" - } - }, - "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async", - "segments": [ + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async", + "segments": [ { "end": 45, "start": 27, @@ -2980,18 +5561,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager", + "shortName": "list_features" }, + "description": "Sample for ListFeatures", "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync", "segments": [ { @@ -3024,19 +5641,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_featurestores", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeaturestores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", + "shortName": "list_featurestores" }, + "description": "Sample for ListFeaturestores", "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async", "segments": [ { @@ -3069,18 +5722,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_featurestores", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "ListFeaturestores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager", + "shortName": "list_featurestores" }, + "description": "Sample for ListFeaturestores", "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync", "segments": [ { @@ -3113,19 +5802,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.search_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "SearchFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", + "shortName": "search_features" }, + "description": "Sample for SearchFeatures", "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async", "segments": [ { @@ -3158,18 +5887,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.search_features", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "SearchFeatures" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager", + "shortName": "search_features" }, + "description": "Sample for SearchFeatures", "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync", "segments": [ { @@ -3202,19 +5971,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" }, + "description": "Sample for UpdateEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async", "segments": [ { @@ -3247,18 +6056,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_entity_type", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateEntityType" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" }, + "description": "Sample for UpdateEntityType", "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync", "segments": [ { @@ -3291,19 +6140,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" }, + "description": "Sample for UpdateFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async", "segments": [ { @@ -3336,18 +6225,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_feature", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeature" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" }, + "description": "Sample for UpdateFeature", "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync", "segments": [ { @@ -3380,19 +6309,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_featurestore" }, + "description": "Sample for UpdateFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async", "segments": [ { @@ -3425,18 +6394,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_featurestore", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", "shortName": "FeaturestoreService" }, "shortName": "UpdateFeaturestore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_featurestore" }, + "description": "Sample for UpdateFeaturestore", "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync", "segments": [ { @@ -3469,19 +6478,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.create_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "CreateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index_endpoint" }, + "description": "Sample for CreateIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async", "segments": [ { @@ -3514,18 +6563,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.create_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "CreateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index_endpoint" }, + "description": "Sample for CreateIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync", "segments": [ { @@ -3558,19 +6647,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.delete_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeleteIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index_endpoint" }, + "description": "Sample for DeleteIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async", "segments": [ { @@ -3603,18 +6728,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.delete_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeleteIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index_endpoint" }, + "description": "Sample for DeleteIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", "segments": [ { @@ -3647,19 +6808,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.deploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_index" }, + "description": "Sample for DeployIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async", "segments": [ { @@ -3692,18 +6893,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.deploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "DeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_index" }, + "description": "Sample for DeployIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync", "segments": [ { @@ -3736,19 +6977,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.get_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "GetIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, + "description": "Sample for GetIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async", "segments": [ { @@ -3781,18 +7058,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.get_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "GetIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, + "description": "Sample for GetIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync", "segments": [ { @@ -3825,19 +7138,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.list_index_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "ListIndexEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" }, + "description": "Sample for ListIndexEndpoints", "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async", "segments": [ { @@ -3870,18 +7219,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.list_index_endpoints", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "ListIndexEndpoints" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" }, + "description": "Sample for ListIndexEndpoints", "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync", "segments": [ { @@ -3914,19 +7299,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.mutate_deployed_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "MutateDeployedIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "mutate_deployed_index" }, + "description": "Sample for MutateDeployedIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async", "segments": [ { @@ -3959,18 +7384,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.mutate_deployed_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "MutateDeployedIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "mutate_deployed_index" }, + "description": "Sample for MutateDeployedIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync", "segments": [ { @@ -4003,19 +7468,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.undeploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UndeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_index" }, + "description": "Sample for UndeployIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async", "segments": [ { @@ -4048,18 +7553,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.undeploy_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UndeployIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_index" }, + "description": "Sample for UndeployIndex", "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync", "segments": [ { @@ -4092,19 +7637,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.update_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UpdateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, + "description": "Sample for UpdateIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async", "segments": [ { @@ -4137,18 +7722,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.update_index_endpoint", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", "shortName": "IndexEndpointService" }, "shortName": "UpdateIndexEndpoint" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, + "description": "Sample for UpdateIndexEndpoint", "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", "segments": [ { @@ -4181,19 +7806,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.create_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "CreateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index" }, + "description": "Sample for CreateIndex", "file": "aiplatform_v1beta1_generated_index_service_create_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_async", "segments": [ { @@ -4226,18 +7891,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_create_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.create_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "CreateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index" }, + "description": "Sample for CreateIndex", "file": "aiplatform_v1beta1_generated_index_service_create_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_sync", "segments": [ { @@ -4270,19 +7975,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_create_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.delete_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "DeleteIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index" }, + "description": "Sample for DeleteIndex", "file": "aiplatform_v1beta1_generated_index_service_delete_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_async", "segments": [ { @@ -4315,18 +8056,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_delete_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.delete_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "DeleteIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index" }, + "description": "Sample for DeleteIndex", "file": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync", "segments": [ { @@ -4359,19 +8136,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.get_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "GetIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" }, + "description": "Sample for GetIndex", "file": "aiplatform_v1beta1_generated_index_service_get_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_async", "segments": [ { @@ -4404,18 +8217,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_get_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.get_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "GetIndex" - } - }, + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", "file": "aiplatform_v1beta1_generated_index_service_get_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_sync", "segments": [ { @@ -4448,19 +8297,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_get_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.list_indexes", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "ListIndexes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" }, + "description": "Sample for ListIndexes", "file": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_async", "segments": [ { @@ -4493,18 +8378,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.list_indexes", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "ListIndexes" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" }, + "description": "Sample for ListIndexes", "file": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_sync", "segments": [ { @@ -4537,19 +8458,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.update_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "UpdateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_index" }, + "description": "Sample for UpdateIndex", "file": "aiplatform_v1beta1_generated_index_service_update_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_async", "segments": [ { @@ -4582,18 +8543,58 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_update_index_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.update_index", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", "shortName": "IndexService" }, "shortName": "UpdateIndex" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_index" }, + "description": "Sample for UpdateIndex", "file": "aiplatform_v1beta1_generated_index_service_update_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync", "segments": [ { @@ -4626,19 +8627,54 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_index_service_update_index_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" }, + "description": "Sample for CancelBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async", "segments": [ { @@ -4669,18 +8705,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" }, + "description": "Sample for CancelBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync", "segments": [ { @@ -4711,19 +8782,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" }, + "description": "Sample for CancelCustomJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_async", "segments": [ { @@ -4754,18 +8860,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" }, + "description": "Sample for CancelCustomJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync", "segments": [ { @@ -4796,19 +8937,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" }, + "description": "Sample for CancelDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async", "segments": [ { @@ -4839,18 +9015,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" }, + "description": "Sample for CancelDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync", "segments": [ { @@ -4881,19 +9092,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" }, + "description": "Sample for CancelHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async", "segments": [ { @@ -4924,18 +9170,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CancelHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" }, + "description": "Sample for CancelHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync", "segments": [ { @@ -4966,19 +9247,59 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, + "description": "Sample for CreateBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async", "segments": [ { @@ -5011,18 +9332,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, + "description": "Sample for CreateBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync", "segments": [ { @@ -5055,19 +9416,59 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" }, + "description": "Sample for CreateCustomJob", "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_async", "segments": [ { @@ -5100,18 +9501,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" }, + "description": "Sample for CreateCustomJob", "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync", "segments": [ { @@ -5144,19 +9585,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, + "description": "Sample for CreateDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async", "segments": [ { @@ -5189,18 +9670,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, + "description": "Sample for CreateDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync", "segments": [ { @@ -5233,19 +9754,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, + "description": "Sample for CreateHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async", "segments": [ { @@ -5278,18 +9839,58 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, + "description": "Sample for CreateHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync", "segments": [ { @@ -5322,19 +9923,59 @@ "start": 54, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, + "description": "Sample for CreateModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async", "segments": [ { @@ -5367,18 +10008,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "CreateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, + "description": "Sample for CreateModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", "segments": [ { @@ -5411,19 +10092,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_batch_prediction_job" }, + "description": "Sample for DeleteBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async", "segments": [ { @@ -5456,18 +10173,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_batch_prediction_job" }, + "description": "Sample for DeleteBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync", "segments": [ { @@ -5500,19 +10253,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_custom_job" }, + "description": "Sample for DeleteCustomJob", "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async", "segments": [ { @@ -5545,18 +10334,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_custom_job" }, + "description": "Sample for DeleteCustomJob", "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync", "segments": [ { @@ -5589,19 +10414,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" }, + "description": "Sample for DeleteDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async", "segments": [ { @@ -5634,18 +10495,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" }, + "description": "Sample for DeleteDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync", "segments": [ { @@ -5678,19 +10575,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_hyperparameter_tuning_job" }, + "description": "Sample for DeleteHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async", "segments": [ { @@ -5723,18 +10656,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_hyperparameter_tuning_job" }, + "description": "Sample for DeleteHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync", "segments": [ { @@ -5767,19 +10736,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_deployment_monitoring_job" }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", "segments": [ { @@ -5812,18 +10817,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "DeleteModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_deployment_monitoring_job" }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", "segments": [ { @@ -5856,19 +10897,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, + "description": "Sample for GetBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async", "segments": [ { @@ -5901,18 +10978,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_batch_prediction_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetBatchPredictionJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, + "description": "Sample for GetBatchPredictionJob", "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync", "segments": [ { @@ -5945,19 +11058,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" }, + "description": "Sample for GetCustomJob", "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_async", "segments": [ { @@ -5990,18 +11139,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_custom_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetCustomJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" }, + "description": "Sample for GetCustomJob", "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_sync", "segments": [ { @@ -6034,19 +11219,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, + "description": "Sample for GetDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async", "segments": [ { @@ -6079,18 +11300,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_data_labeling_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetDataLabelingJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, + "description": "Sample for GetDataLabelingJob", "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync", "segments": [ { @@ -6123,19 +11380,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, + "description": "Sample for GetHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async", "segments": [ { @@ -6168,18 +11461,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_hyperparameter_tuning_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetHyperparameterTuningJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, + "description": "Sample for GetHyperparameterTuningJob", "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync", "segments": [ { @@ -6212,19 +11541,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, + "description": "Sample for GetModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async", "segments": [ { @@ -6257,18 +11622,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "GetModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, + "description": "Sample for GetModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6301,19 +11702,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_batch_prediction_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListBatchPredictionJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" }, + "description": "Sample for ListBatchPredictionJobs", "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async", "segments": [ { @@ -6346,18 +11783,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_batch_prediction_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListBatchPredictionJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" }, + "description": "Sample for ListBatchPredictionJobs", "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync", "segments": [ { @@ -6390,19 +11863,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_custom_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListCustomJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" }, + "description": "Sample for ListCustomJobs", "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_async", "segments": [ { @@ -6435,18 +11944,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_custom_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListCustomJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" }, + "description": "Sample for ListCustomJobs", "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync", "segments": [ { @@ -6479,19 +12024,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_data_labeling_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListDataLabelingJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" }, + "description": "Sample for ListDataLabelingJobs", "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async", "segments": [ { @@ -6524,18 +12105,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_data_labeling_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListDataLabelingJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" }, + "description": "Sample for ListDataLabelingJobs", "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync", "segments": [ { @@ -6568,19 +12185,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListHyperparameterTuningJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" }, + "description": "Sample for ListHyperparameterTuningJobs", "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async", "segments": [ { @@ -6613,18 +12266,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_hyperparameter_tuning_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListHyperparameterTuningJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" }, + "description": "Sample for ListHyperparameterTuningJobs", "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync", "segments": [ { @@ -6657,19 +12346,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListModelDeploymentMonitoringJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" }, + "description": "Sample for ListModelDeploymentMonitoringJobs", "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async", "segments": [ { @@ -6702,18 +12427,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_model_deployment_monitoring_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ListModelDeploymentMonitoringJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" }, + "description": "Sample for ListModelDeploymentMonitoringJobs", "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", "segments": [ { @@ -6746,19 +12507,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "PauseModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" }, + "description": "Sample for PauseModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async", "segments": [ { @@ -6789,18 +12585,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.pause_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "PauseModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" }, + "description": "Sample for PauseModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6831,19 +12662,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ResumeModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", "segments": [ { @@ -6874,18 +12740,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.resume_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "ResumeModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", "segments": [ { @@ -6916,19 +12817,59 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", "segments": [ { @@ -6961,18 +12902,58 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", "segments": [ { @@ -7005,19 +12986,59 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.update_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "UpdateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", "segments": [ { @@ -7050,18 +13071,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.update_model_deployment_monitoring_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, "shortName": "UpdateModelDeploymentMonitoringJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", "segments": [ { @@ -7094,19 +13155,63 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextArtifactsAndExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, + "description": "Sample for AddContextArtifactsAndExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async", "segments": [ { @@ -7139,18 +13244,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_artifacts_and_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextArtifactsAndExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, + "description": "Sample for AddContextArtifactsAndExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", "segments": [ { @@ -7183,19 +13332,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_children", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextChildren" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, + "description": "Sample for AddContextChildren", "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async", "segments": [ { @@ -7228,18 +13417,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_children", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddContextChildren" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, + "description": "Sample for AddContextChildren", "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync", "segments": [ { @@ -7272,19 +13501,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_execution_events", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddExecutionEvents" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, + "description": "Sample for AddExecutionEvents", "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async", "segments": [ { @@ -7317,18 +13586,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_execution_events", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "AddExecutionEvents" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, + "description": "Sample for AddExecutionEvents", "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync", "segments": [ { @@ -7361,19 +13670,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" }, + "description": "Sample for CreateArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async", "segments": [ { @@ -7406,18 +13759,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" }, + "description": "Sample for CreateArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync", "segments": [ { @@ -7450,19 +13847,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" }, + "description": "Sample for CreateContext", "file": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_async", "segments": [ { @@ -7495,18 +13936,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" }, + "description": "Sample for CreateContext", "file": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_sync", "segments": [ { @@ -7539,19 +14024,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" }, + "description": "Sample for CreateExecution", "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_async", "segments": [ { @@ -7584,18 +14113,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" }, + "description": "Sample for CreateExecution", "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync", "segments": [ { @@ -7628,19 +14201,63 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, + "description": "Sample for CreateMetadataSchema", "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async", "segments": [ { @@ -7673,18 +14290,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, + "description": "Sample for CreateMetadataSchema", "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync", "segments": [ { @@ -7717,19 +14378,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_metadata_store" }, + "description": "Sample for CreateMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async", "segments": [ { @@ -7762,18 +14467,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "CreateMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_metadata_store" }, + "description": "Sample for CreateMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync", "segments": [ { @@ -7806,19 +14555,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" }, + "description": "Sample for DeleteArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async", "segments": [ { @@ -7851,18 +14636,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" }, + "description": "Sample for DeleteArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync", "segments": [ { @@ -7895,19 +14716,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" }, + "description": "Sample for DeleteContext", "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_async", "segments": [ { @@ -7940,18 +14797,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" }, + "description": "Sample for DeleteContext", "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync", "segments": [ { @@ -7984,19 +14877,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" }, + "description": "Sample for DeleteExecution", "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async", "segments": [ { @@ -8029,18 +14958,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" }, + "description": "Sample for DeleteExecution", "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync", "segments": [ { @@ -8073,19 +15038,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" }, + "description": "Sample for DeleteMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async", "segments": [ { @@ -8118,18 +15119,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "DeleteMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" }, + "description": "Sample for DeleteMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync", "segments": [ { @@ -8162,19 +15199,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" }, + "description": "Sample for GetArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_async", "segments": [ { @@ -8207,18 +15280,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" }, + "description": "Sample for GetArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync", "segments": [ { @@ -8251,19 +15360,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" }, + "description": "Sample for GetContext", "file": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_async", "segments": [ { @@ -8296,18 +15441,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" }, + "description": "Sample for GetContext", "file": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_sync", "segments": [ { @@ -8340,19 +15521,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" }, + "description": "Sample for GetExecution", "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_async", "segments": [ { @@ -8385,18 +15602,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" }, + "description": "Sample for GetExecution", "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_sync", "segments": [ { @@ -8429,19 +15682,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, + "description": "Sample for GetMetadataSchema", "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async", "segments": [ { @@ -8474,18 +15763,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_schema", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataSchema" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, + "description": "Sample for GetMetadataSchema", "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync", "segments": [ { @@ -8518,19 +15843,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" }, + "description": "Sample for GetMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async", "segments": [ { @@ -8563,18 +15924,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_store", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "GetMetadataStore" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" }, + "description": "Sample for GetMetadataStore", "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync", "segments": [ { @@ -8607,19 +16004,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" }, + "description": "Sample for ListArtifacts", "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async", "segments": [ { @@ -8652,18 +16085,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" }, + "description": "Sample for ListArtifacts", "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync", "segments": [ { @@ -8696,19 +16165,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" }, + "description": "Sample for ListContexts", "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_async", "segments": [ { @@ -8741,18 +16246,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" }, + "description": "Sample for ListContexts", "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_sync", "segments": [ { @@ -8785,19 +16326,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" }, + "description": "Sample for ListExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_async", "segments": [ { @@ -8830,18 +16407,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" }, + "description": "Sample for ListExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync", "segments": [ { @@ -8874,19 +16487,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_schemas", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataSchemas" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" }, + "description": "Sample for ListMetadataSchemas", "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async", "segments": [ { @@ -8919,18 +16568,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_schemas", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataSchemas" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" }, + "description": "Sample for ListMetadataSchemas", "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync", "segments": [ { @@ -8963,19 +16648,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_stores", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataStores" - } - }, - "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py", + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async", "segments": [ { @@ -9008,18 +16729,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_stores", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "ListMetadataStores" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" }, + "description": "Sample for ListMetadataStores", "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync", "segments": [ { @@ -9052,19 +16809,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" }, + "description": "Sample for PurgeArtifacts", "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async", "segments": [ { @@ -9097,18 +16890,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_artifacts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeArtifacts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" }, + "description": "Sample for PurgeArtifacts", "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync", "segments": [ { @@ -9141,19 +16970,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" }, + "description": "Sample for PurgeContexts", "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async", "segments": [ { @@ -9186,18 +17051,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_contexts", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeContexts" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" }, + "description": "Sample for PurgeContexts", "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync", "segments": [ { @@ -9230,19 +17131,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" }, + "description": "Sample for PurgeExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async", "segments": [ { @@ -9275,18 +17212,54 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_executions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "PurgeExecutions" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" }, + "description": "Sample for PurgeExecutions", "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync", "segments": [ { @@ -9319,19 +17292,55 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryArtifactLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, + "description": "Sample for QueryArtifactLineageSubgraph", "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async", "segments": [ { @@ -9364,18 +17373,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_artifact_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryArtifactLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, + "description": "Sample for QueryArtifactLineageSubgraph", "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", "segments": [ { @@ -9408,19 +17453,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_context_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryContextLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, + "description": "Sample for QueryContextLineageSubgraph", "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async", "segments": [ { @@ -9453,18 +17534,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_context_lineage_subgraph", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryContextLineageSubgraph" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, + "description": "Sample for QueryContextLineageSubgraph", "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync", "segments": [ { @@ -9497,19 +17614,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryExecutionInputsAndOutputs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, + "description": "Sample for QueryExecutionInputsAndOutputs", "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", "segments": [ { @@ -9542,18 +17695,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_execution_inputs_and_outputs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "QueryExecutionInputsAndOutputs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, + "description": "Sample for QueryExecutionInputsAndOutputs", "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", "segments": [ { @@ -9586,19 +17775,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" }, + "description": "Sample for UpdateArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async", "segments": [ { @@ -9631,18 +17860,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_artifact", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateArtifact" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" }, + "description": "Sample for UpdateArtifact", "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync", "segments": [ { @@ -9675,19 +17944,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" }, + "description": "Sample for UpdateContext", "file": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_async", "segments": [ { @@ -9720,18 +18029,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_context", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateContext" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" }, + "description": "Sample for UpdateContext", "file": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync", "segments": [ { @@ -9764,19 +18113,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" }, + "description": "Sample for UpdateExecution", "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async", "segments": [ { @@ -9809,18 +18198,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_execution", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, "shortName": "UpdateExecution" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" }, + "description": "Sample for UpdateExecution", "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync", "segments": [ { @@ -9853,19 +18282,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.batch_migrate_resources", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", "shortName": "MigrationService" }, "shortName": "BatchMigrateResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" }, + "description": "Sample for BatchMigrateResources", "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async", "segments": [ { @@ -9898,18 +18367,58 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.batch_migrate_resources", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", "shortName": "MigrationService" }, "shortName": "BatchMigrateResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" }, + "description": "Sample for BatchMigrateResources", "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync", "segments": [ { @@ -9942,19 +18451,55 @@ "start": 52, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.search_migratable_resources", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", "shortName": "MigrationService" }, "shortName": "SearchMigratableResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" }, + "description": "Sample for SearchMigratableResources", "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async", "segments": [ { @@ -9987,18 +18532,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.search_migratable_resources", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", "shortName": "MigrationService" }, "shortName": "SearchMigratableResources" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" }, + "description": "Sample for SearchMigratableResources", "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync", "segments": [ { @@ -10031,20 +18612,56 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model_version", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "DeleteModel" - } + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_version" }, - "file": "aiplatform_v1beta1_generated_model_service_delete_model_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_async", + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async", "segments": [ { "end": 48, @@ -10076,19 +18693,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model_version", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "DeleteModel" - } + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_version" }, - "file": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_sync", + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync", "segments": [ { "end": 48, @@ -10120,20 +18773,56 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ExportModel" - } + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" }, - "file": "aiplatform_v1beta1_generated_model_service_export_model_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_async", + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_async", "segments": [ { "end": 48, @@ -10165,19 +18854,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ExportModel" - } + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" }, - "file": "aiplatform_v1beta1_generated_model_service_export_model_sync.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_sync", + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_sync", "segments": [ { "end": 48, @@ -10209,28 +18934,68 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.export_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "GetModelEvaluationSlice" - } + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" }, - "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async", + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_async", "segments": [ { - "end": 44, + "end": 48, "start": 27, "type": "FULL" }, { - "end": 44, + "end": 48, "start": 27, "type": "SHORT" }, @@ -10245,27 +19010,228 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 41, + "end": 45, "start": 39, "type": "REQUEST_EXECUTION" }, { - "end": 45, - "start": 42, + "end": 49, + "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_export_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.export_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "GetModelEvaluationSlice" - } + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync", "segments": [ { @@ -10298,19 +19264,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, + "description": "Sample for GetModelEvaluation", "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async", "segments": [ { @@ -10343,18 +19345,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "GetModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, + "description": "Sample for GetModelEvaluation", "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync", "segments": [ { @@ -10387,19 +19425,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "GetModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" }, + "description": "Sample for GetModel", "file": "aiplatform_v1beta1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_async", "segments": [ { @@ -10432,18 +19506,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "GetModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" }, + "description": "Sample for GetModel", "file": "aiplatform_v1beta1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_sync", "segments": [ { @@ -10476,19 +19586,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.import_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "ImportModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, + "description": "Sample for ImportModelEvaluation", "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async", "segments": [ { @@ -10521,27 +19671,389 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.import_model_evaluation", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "ImportModelEvaluation" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, + "description": "Sample for ImportModelEvaluation", "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync", "segments": [ { - "end": 44, + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync", + "segments": [ + { + "end": 45, "start": 27, "type": "FULL" }, { - "end": 44, + "end": 45, "start": 27, "type": "SHORT" }, @@ -10561,24 +20073,60 @@ "type": "REQUEST_EXECUTION" }, { - "end": 45, + "end": 46, "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_versions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModelEvaluationSlices" - } + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager", + "shortName": "list_model_versions" }, - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async", + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_async", "segments": [ { "end": 45, @@ -10610,19 +20158,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_versions", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModelEvaluationSlices" - } + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager", + "shortName": "list_model_versions" }, - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync", + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync", "segments": [ { "end": 45, @@ -10654,20 +20238,56 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_models", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModelEvaluations" - } + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" }, - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async", + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_async", "segments": [ { "end": 45, @@ -10699,19 +20319,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_list_models_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_models", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModelEvaluations" - } + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" }, - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync", + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_sync", "segments": [ { "end": 45, @@ -10743,20 +20399,60 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_list_models_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.merge_version_aliases", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModels" - } + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" }, - "file": "aiplatform_v1beta1_generated_model_service_list_models_async.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_async", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async", "segments": [ { "end": 45, @@ -10774,33 +20470,73 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 38, + "end": 39, "start": 34, "type": "REQUEST_INITIALIZATION" }, { - "end": 41, - "start": 39, + "end": 42, + "start": 40, "type": "REQUEST_EXECUTION" }, { "end": 46, - "start": 42, + "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.merge_version_aliases", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, - "shortName": "ListModels" - } + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" }, - "file": "aiplatform_v1beta1_generated_model_service_list_models_sync.py", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_sync", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync", "segments": [ { "end": 45, @@ -10818,33 +20554,73 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 38, + "end": 39, "start": 34, "type": "REQUEST_INITIALIZATION" }, { - "end": 41, - "start": 39, + "end": 42, + "start": 40, "type": "REQUEST_EXECUTION" }, { "end": 46, - "start": 42, + "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "UpdateModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" }, + "description": "Sample for UpdateModel", "file": "aiplatform_v1beta1_generated_model_service_update_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_async", "segments": [ { @@ -10877,18 +20653,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_update_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "UpdateModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" }, + "description": "Sample for UpdateModel", "file": "aiplatform_v1beta1_generated_model_service_update_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_sync", "segments": [ { @@ -10921,19 +20737,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_update_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.upload_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "UploadModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" }, + "description": "Sample for UploadModel", "file": "aiplatform_v1beta1_generated_model_service_upload_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_async", "segments": [ { @@ -10966,18 +20822,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_upload_model_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.upload_model", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", "shortName": "ModelService" }, "shortName": "UploadModel" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" }, + "description": "Sample for UploadModel", "file": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_sync", "segments": [ { @@ -11010,19 +20906,54 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" }, + "description": "Sample for CancelPipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async", "segments": [ { @@ -11053,18 +20984,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" }, + "description": "Sample for CancelPipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync", "segments": [ { @@ -11095,19 +21061,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" }, + "description": "Sample for CancelTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async", "segments": [ { @@ -11138,18 +21139,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CancelTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" }, + "description": "Sample for CancelTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync", "segments": [ { @@ -11180,19 +21216,63 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreatePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" }, + "description": "Sample for CreatePipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async", "segments": [ { @@ -11225,18 +21305,62 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreatePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" }, + "description": "Sample for CreatePipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync", "segments": [ { @@ -11269,19 +21393,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreateTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, + "description": "Sample for CreateTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async", "segments": [ { @@ -11314,18 +21478,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "CreateTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, + "description": "Sample for CreateTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync", "segments": [ { @@ -11358,19 +21562,55 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeletePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_pipeline_job" }, + "description": "Sample for DeletePipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async", "segments": [ { @@ -11403,18 +21643,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeletePipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_pipeline_job" }, + "description": "Sample for DeletePipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync", "segments": [ { @@ -11447,19 +21723,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeleteTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_training_pipeline" }, + "description": "Sample for DeleteTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async", "segments": [ { @@ -11492,18 +21804,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "DeleteTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_training_pipeline" }, + "description": "Sample for DeleteTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync", "segments": [ { @@ -11536,19 +21884,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" }, + "description": "Sample for GetPipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async", "segments": [ { @@ -11581,18 +21965,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_pipeline_job", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetPipelineJob" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" }, + "description": "Sample for GetPipelineJob", "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync", "segments": [ { @@ -11625,19 +22045,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, + "description": "Sample for GetTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async", "segments": [ { @@ -11670,18 +22126,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_training_pipeline", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "GetTrainingPipeline" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, + "description": "Sample for GetTrainingPipeline", "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync", "segments": [ { @@ -11714,19 +22206,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_pipeline_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListPipelineJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", + "shortName": "list_pipeline_jobs" }, + "description": "Sample for ListPipelineJobs", "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async", "segments": [ { @@ -11759,18 +22287,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_pipeline_jobs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListPipelineJobs" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager", + "shortName": "list_pipeline_jobs" }, + "description": "Sample for ListPipelineJobs", "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync", "segments": [ { @@ -11803,19 +22367,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_training_pipelines", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListTrainingPipelines" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", + "shortName": "list_training_pipelines" }, + "description": "Sample for ListTrainingPipelines", "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async", "segments": [ { @@ -11848,18 +22448,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_training_pipelines", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, "shortName": "ListTrainingPipelines" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager", + "shortName": "list_training_pipelines" }, + "description": "Sample for ListTrainingPipelines", "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync", "segments": [ { @@ -11892,19 +22528,67 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.explain", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "Explain" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" }, + "description": "Sample for Explain", "file": "aiplatform_v1beta1_generated_prediction_service_explain_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_async", "segments": [ { @@ -11937,18 +22621,66 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_explain_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.explain", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "Explain" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" }, + "description": "Sample for Explain", "file": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_sync", "segments": [ { @@ -11981,19 +22713,63 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.predict", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "Predict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" }, + "description": "Sample for Predict", "file": "aiplatform_v1beta1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_async", "segments": [ { @@ -12026,18 +22802,62 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_predict_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.predict", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "Predict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" }, + "description": "Sample for Predict", "file": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_sync", "segments": [ { @@ -12070,19 +22890,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.raw_predict", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "RawPredict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, + "description": "Sample for RawPredict", "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_async", "segments": [ { @@ -12115,18 +22975,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.raw_predict", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, "shortName": "RawPredict" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, + "description": "Sample for RawPredict", "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_sync", "segments": [ { @@ -12159,19 +23059,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.create_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "CreateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_specialist_pool" }, + "description": "Sample for CreateSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async", "segments": [ { @@ -12204,18 +23144,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.create_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "CreateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_specialist_pool" }, + "description": "Sample for CreateSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync", "segments": [ { @@ -12248,19 +23228,55 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "DeleteSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_specialist_pool" }, + "description": "Sample for DeleteSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async", "segments": [ { @@ -12293,18 +23309,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.delete_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "DeleteSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_specialist_pool" }, + "description": "Sample for DeleteSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", "segments": [ { @@ -12337,19 +23389,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.get_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "GetSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, + "description": "Sample for GetSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async", "segments": [ { @@ -12382,18 +23470,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.get_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "GetSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, + "description": "Sample for GetSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync", "segments": [ { @@ -12426,19 +23550,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.list_specialist_pools", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "ListSpecialistPools" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", + "shortName": "list_specialist_pools" }, + "description": "Sample for ListSpecialistPools", "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async", "segments": [ { @@ -12471,18 +23631,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.list_specialist_pools", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "ListSpecialistPools" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", + "shortName": "list_specialist_pools" }, + "description": "Sample for ListSpecialistPools", "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync", "segments": [ { @@ -12515,19 +23711,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.update_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "UpdateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_specialist_pool" }, + "description": "Sample for UpdateSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async", "segments": [ { @@ -12560,18 +23796,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.update_specialist_pool", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", "shortName": "SpecialistPoolService" }, "shortName": "UpdateSpecialistPool" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_specialist_pool" }, + "description": "Sample for UpdateSpecialistPool", "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", "segments": [ { @@ -12604,19 +23880,59 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, + "description": "Sample for BatchCreateTensorboardRuns", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async", "segments": [ { @@ -12649,18 +23965,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, + "description": "Sample for BatchCreateTensorboardRuns", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", "segments": [ { @@ -12693,19 +24049,59 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, + "description": "Sample for BatchCreateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", "segments": [ { @@ -12738,18 +24134,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchCreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, + "description": "Sample for BatchCreateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", "segments": [ { @@ -12782,19 +24218,55 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", "segments": [ { @@ -12827,18 +24299,54 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "BatchReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", "segments": [ { @@ -12871,19 +24379,63 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, + "description": "Sample for CreateTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async", "segments": [ { @@ -12916,18 +24468,62 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, + "description": "Sample for CreateTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync", "segments": [ { @@ -12960,19 +24556,63 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, + "description": "Sample for CreateTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async", "segments": [ { @@ -13005,18 +24645,62 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, + "description": "Sample for CreateTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync", "segments": [ { @@ -13049,19 +24733,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, + "description": "Sample for CreateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async", "segments": [ { @@ -13094,18 +24818,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, + "description": "Sample for CreateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", "segments": [ { @@ -13138,19 +24902,59 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_tensorboard" }, + "description": "Sample for CreateTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async", "segments": [ { @@ -13183,18 +24987,58 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "CreateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_tensorboard" }, + "description": "Sample for CreateTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync", "segments": [ { @@ -13227,19 +25071,55 @@ "start": 50, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_experiment" }, + "description": "Sample for DeleteTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async", "segments": [ { @@ -13272,18 +25152,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_experiment" }, + "description": "Sample for DeleteTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync", "segments": [ { @@ -13316,19 +25232,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_run" }, + "description": "Sample for DeleteTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async", "segments": [ { @@ -13361,18 +25313,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_run" }, + "description": "Sample for DeleteTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync", "segments": [ { @@ -13405,19 +25393,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_time_series" }, + "description": "Sample for DeleteTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", "segments": [ { @@ -13450,18 +25474,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_time_series" }, + "description": "Sample for DeleteTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", "segments": [ { @@ -13494,19 +25554,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard" }, + "description": "Sample for DeleteTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async", "segments": [ { @@ -13539,18 +25635,54 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "DeleteTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard" }, + "description": "Sample for DeleteTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync", "segments": [ { @@ -13583,19 +25715,55 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ExportTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", + "shortName": "export_tensorboard_time_series_data" }, + "description": "Sample for ExportTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", "segments": [ { @@ -13628,18 +25796,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.export_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ExportTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", + "shortName": "export_tensorboard_time_series_data" }, + "description": "Sample for ExportTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", "segments": [ { @@ -13672,19 +25876,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, + "description": "Sample for GetTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async", "segments": [ { @@ -13717,18 +25957,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, + "description": "Sample for GetTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync", "segments": [ { @@ -13761,19 +26037,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, + "description": "Sample for GetTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async", "segments": [ { @@ -13806,18 +26118,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, + "description": "Sample for GetTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync", "segments": [ { @@ -13850,19 +26198,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, + "description": "Sample for GetTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async", "segments": [ { @@ -13895,18 +26279,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, + "description": "Sample for GetTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync", "segments": [ { @@ -13939,19 +26359,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" }, + "description": "Sample for GetTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async", "segments": [ { @@ -13984,18 +26440,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "GetTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" }, + "description": "Sample for GetTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync", "segments": [ { @@ -14028,19 +26520,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_experiments", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardExperiments" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", + "shortName": "list_tensorboard_experiments" }, + "description": "Sample for ListTensorboardExperiments", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async", "segments": [ { @@ -14073,18 +26601,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_experiments", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardExperiments" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", + "shortName": "list_tensorboard_experiments" }, + "description": "Sample for ListTensorboardExperiments", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync", "segments": [ { @@ -14117,19 +26681,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", + "shortName": "list_tensorboard_runs" }, + "description": "Sample for ListTensorboardRuns", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async", "segments": [ { @@ -14162,18 +26762,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_runs", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardRuns" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager", + "shortName": "list_tensorboard_runs" }, + "description": "Sample for ListTensorboardRuns", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync", "segments": [ { @@ -14206,19 +26842,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", + "shortName": "list_tensorboard_time_series" }, + "description": "Sample for ListTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async", "segments": [ { @@ -14251,18 +26923,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", + "shortName": "list_tensorboard_time_series" }, + "description": "Sample for ListTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync", "segments": [ { @@ -14295,19 +27003,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboards", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboards" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", + "shortName": "list_tensorboards" }, + "description": "Sample for ListTensorboards", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async", "segments": [ { @@ -14340,18 +27084,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboards", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ListTensorboards" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager", + "shortName": "list_tensorboards" }, + "description": "Sample for ListTensorboards", "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync", "segments": [ { @@ -14384,19 +27164,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardBlobData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, + "description": "Sample for ReadTensorboardBlobData", "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async", "segments": [ { @@ -14429,18 +27245,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_blob_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardBlobData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, + "description": "Sample for ReadTensorboardBlobData", "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync", "segments": [ { @@ -14473,19 +27325,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, + "description": "Sample for ReadTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", "segments": [ { @@ -14518,18 +27406,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_time_series_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "ReadTensorboardTimeSeriesData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, + "description": "Sample for ReadTensorboardTimeSeriesData", "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", "segments": [ { @@ -14562,19 +27486,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, + "description": "Sample for UpdateTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async", "segments": [ { @@ -14607,18 +27571,58 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_experiment", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardExperiment" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, + "description": "Sample for UpdateTensorboardExperiment", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync", "segments": [ { @@ -14651,19 +27655,59 @@ "start": 41, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, + "description": "Sample for UpdateTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async", "segments": [ { @@ -14696,18 +27740,58 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_run", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardRun" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, + "description": "Sample for UpdateTensorboardRun", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync", "segments": [ { @@ -14740,19 +27824,59 @@ "start": 45, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboardTimeSeries" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" }, + "description": "Sample for UpdateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", "segments": [ { @@ -14785,18 +27909,58 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_time_series", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "UpdateTensorboardTimeSeries" - } + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" }, + "description": "Sample for UpdateTensorboardTimeSeries", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", "segments": [ { @@ -14829,19 +27993,59 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_tensorboard" }, + "description": "Sample for UpdateTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async", "segments": [ { @@ -14874,18 +28078,58 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "UpdateTensorboard" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_tensorboard" }, + "description": "Sample for UpdateTensorboard", "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync", "segments": [ { @@ -14918,19 +28162,59 @@ "start": 49, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardExperimentData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, + "description": "Sample for WriteTensorboardExperimentData", "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async", "segments": [ { @@ -14963,18 +28247,58 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_experiment_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardExperimentData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, + "description": "Sample for WriteTensorboardExperimentData", "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync", "segments": [ { @@ -15007,19 +28331,59 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_run_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardRunData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, + "description": "Sample for WriteTensorboardRunData", "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async", "segments": [ { @@ -15052,18 +28416,58 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_run_data", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, "shortName": "WriteTensorboardRunData" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, + "description": "Sample for WriteTensorboardRunData", "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync", "segments": [ { @@ -15096,19 +28500,51 @@ "start": 47, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.add_trial_measurement", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "AddTrialMeasurement" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "add_trial_measurement" }, + "description": "Sample for AddTrialMeasurement", "file": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_async", "segments": [ { @@ -15141,18 +28577,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.add_trial_measurement", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "AddTrialMeasurement" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "add_trial_measurement" }, + "description": "Sample for AddTrialMeasurement", "file": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_sync", "segments": [ { @@ -15185,19 +28653,51 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.check_trial_early_stopping_state", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CheckTrialEarlyStoppingState" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "check_trial_early_stopping_state" }, + "description": "Sample for CheckTrialEarlyStoppingState", "file": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_async", "segments": [ { @@ -15230,18 +28730,50 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.check_trial_early_stopping_state", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CheckTrialEarlyStoppingState" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "check_trial_early_stopping_state" }, + "description": "Sample for CheckTrialEarlyStoppingState", "file": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_sync", "segments": [ { @@ -15274,19 +28806,51 @@ "start": 46, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.complete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CompleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "complete_trial" }, + "description": "Sample for CompleteTrial", "file": "aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CompleteTrial_async", "segments": [ { @@ -15319,18 +28883,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.complete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CompleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "complete_trial" }, + "description": "Sample for CompleteTrial", "file": "aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CompleteTrial_sync", "segments": [ { @@ -15363,19 +28959,59 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.create_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CreateStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1beta1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "create_study" }, + "description": "Sample for CreateStudy", "file": "aiplatform_v1beta1_generated_vizier_service_create_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateStudy_async", "segments": [ { @@ -15408,18 +29044,58 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.create_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CreateStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1beta1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "create_study" }, + "description": "Sample for CreateStudy", "file": "aiplatform_v1beta1_generated_vizier_service_create_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateStudy_sync", "segments": [ { @@ -15452,19 +29128,59 @@ "start": 51, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.create_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CreateTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1beta1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "create_trial" }, + "description": "Sample for CreateTrial", "file": "aiplatform_v1beta1_generated_vizier_service_create_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateTrial_async", "segments": [ { @@ -15497,18 +29213,58 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.create_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "CreateTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1beta1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "create_trial" }, + "description": "Sample for CreateTrial", "file": "aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateTrial_sync", "segments": [ { @@ -15541,19 +29297,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.delete_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" }, + "description": "Sample for DeleteStudy", "file": "aiplatform_v1beta1_generated_vizier_service_delete_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteStudy_async", "segments": [ { @@ -15584,18 +29375,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.delete_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" }, + "description": "Sample for DeleteStudy", "file": "aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteStudy_sync", "segments": [ { @@ -15626,19 +29452,54 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.delete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" }, + "description": "Sample for DeleteTrial", "file": "aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteTrial_async", "segments": [ { @@ -15669,18 +29530,53 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.delete_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "DeleteTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" }, + "description": "Sample for DeleteTrial", "file": "aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteTrial_sync", "segments": [ { @@ -15711,19 +29607,55 @@ "end": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.get_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "GetStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "get_study" }, + "description": "Sample for GetStudy", "file": "aiplatform_v1beta1_generated_vizier_service_get_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_GetStudy_async", "segments": [ { @@ -15756,18 +29688,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.get_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "GetStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "get_study" }, + "description": "Sample for GetStudy", "file": "aiplatform_v1beta1_generated_vizier_service_get_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_GetStudy_sync", "segments": [ { @@ -15800,19 +29768,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.get_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "GetTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "get_trial" }, + "description": "Sample for GetTrial", "file": "aiplatform_v1beta1_generated_vizier_service_get_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_GetTrial_async", "segments": [ { @@ -15845,18 +29849,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.get_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "GetTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "get_trial" }, + "description": "Sample for GetTrial", "file": "aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_GetTrial_sync", "segments": [ { @@ -15889,19 +29929,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_optimal_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListOptimalTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" }, + "description": "Sample for ListOptimalTrials", "file": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_async", "segments": [ { @@ -15934,18 +30010,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_optimal_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListOptimalTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" }, + "description": "Sample for ListOptimalTrials", "file": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_sync", "segments": [ { @@ -15978,19 +30090,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_studies", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListStudies", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListStudies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager", + "shortName": "list_studies" }, + "description": "Sample for ListStudies", "file": "aiplatform_v1beta1_generated_vizier_service_list_studies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListStudies_async", "segments": [ { @@ -16023,18 +30171,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_studies_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_studies", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListStudies", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListStudies" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager", + "shortName": "list_studies" }, + "description": "Sample for ListStudies", "file": "aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListStudies_sync", "segments": [ { @@ -16067,19 +30251,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager", + "shortName": "list_trials" }, + "description": "Sample for ListTrials", "file": "aiplatform_v1beta1_generated_vizier_service_list_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListTrials_async", "segments": [ { @@ -16112,18 +30332,54 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "ListTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager", + "shortName": "list_trials" }, + "description": "Sample for ListTrials", "file": "aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_ListTrials_sync", "segments": [ { @@ -16156,19 +30412,55 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.lookup_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.LookupStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "LookupStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "lookup_study" }, + "description": "Sample for LookupStudy", "file": "aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_LookupStudy_async", "segments": [ { @@ -16201,18 +30493,54 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.lookup_study", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.LookupStudy", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "LookupStudy" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "lookup_study" }, + "description": "Sample for LookupStudy", "file": "aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_LookupStudy_sync", "segments": [ { @@ -16245,19 +30573,51 @@ "start": 43, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.stop_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.StopTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "StopTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "stop_trial" }, + "description": "Sample for StopTrial", "file": "aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_StopTrial_async", "segments": [ { @@ -16290,18 +30650,50 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.stop_trial", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.StopTrial", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "StopTrial" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "stop_trial" }, + "description": "Sample for StopTrial", "file": "aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_StopTrial_sync", "segments": [ { @@ -16334,19 +30726,51 @@ "start": 42, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py" }, { + "canonical": true, "clientMethod": { "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.suggest_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "SuggestTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "suggest_trials" }, + "description": "Sample for SuggestTrials", "file": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_SuggestTrials_async", "segments": [ { @@ -16379,18 +30803,50 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py" }, { + "canonical": true, "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.suggest_trials", "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials", "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", "shortName": "VizierService" }, "shortName": "SuggestTrials" - } + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "suggest_trials" }, + "description": "Sample for SuggestTrials", "file": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", "regionTag": "aiplatform_v1beta1_generated_VizierService_SuggestTrials_sync", "segments": [ { @@ -16423,7 +30879,8 @@ "start": 48, "type": "RESPONSE_HANDLING" } - ] + ], + "title": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py" } ] } diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index a155772f09..76356d74c9 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -103,24 +103,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - DatasetServiceClient, - DatasetServiceAsyncClient, + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), ], ) -def test_dataset_service_client_from_service_account_info(client_class): +def test_dataset_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -149,27 +149,31 @@ def test_dataset_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - DatasetServiceClient, - DatasetServiceAsyncClient, + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), ], ) -def test_dataset_service_client_from_service_account_file(client_class): +def test_dataset_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_dataset_service_client_get_transport_class(): @@ -1759,7 +1763,7 @@ async def test_list_datasets_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1805,7 +1809,9 @@ async def test_list_datasets_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_datasets(request={})).pages: + async for page_ in ( + await client.list_datasets(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2911,7 +2917,7 @@ async def test_list_data_items_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -2957,7 +2963,9 @@ async def test_list_data_items_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_data_items(request={})).pages: + async for page_ in ( + await client.list_data_items(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3584,7 +3592,7 @@ async def test_list_annotations_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3630,7 +3638,9 @@ async def test_list_annotations_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_annotations(request={})).pages: + async for page_ in ( + await client.list_annotations(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3727,6 +3737,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = DatasetServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = DatasetServiceClient( @@ -3783,6 +3806,14 @@ def test_dataset_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_dataset_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3928,24 +3959,40 @@ def test_dataset_service_grpc_transport_client_cert_source_for_mtls(transport_cl ) -def test_dataset_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_dataset_service_host_no_port(transport_name): client = DatasetServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_dataset_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_dataset_service_host_with_port(transport_name): client = DatasetServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_dataset_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index 408fee756f..88b52d3ec2 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -104,24 +104,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - EndpointServiceClient, - EndpointServiceAsyncClient, + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_endpoint_service_client_from_service_account_info(client_class): +def test_endpoint_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -150,27 +152,33 @@ def test_endpoint_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - EndpointServiceClient, - EndpointServiceAsyncClient, + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_endpoint_service_client_from_service_account_file(client_class): +def test_endpoint_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_endpoint_service_client_get_transport_class(): @@ -1540,7 +1548,7 @@ async def test_list_endpoints_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1586,7 +1594,9 @@ async def test_list_endpoints_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: + async for page_ in ( + await client.list_endpoints(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2709,6 +2719,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = EndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = EndpointServiceClient( @@ -2762,6 +2785,14 @@ def test_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_endpoint_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2907,24 +2938,40 @@ def test_endpoint_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_endpoint_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_endpoint_service_host_no_port(transport_name): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_endpoint_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_endpoint_service_host_with_port(transport_name): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_endpoint_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py index d419b7e53c..5ddc6ed984 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -100,14 +100,14 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), ], ) def test_featurestore_online_serving_service_client_from_service_account_info( - client_class, + client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( @@ -115,11 +115,11 @@ def test_featurestore_online_serving_service_client_from_service_account_info( ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -151,29 +151,33 @@ def test_featurestore_online_serving_service_client_service_account_always_use_j @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), ], ) def test_featurestore_online_serving_service_client_from_service_account_file( - client_class, + client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_featurestore_online_serving_service_client_get_transport_class(): @@ -1282,6 +1286,21 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class( + transport_name + )( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = FeaturestoreOnlineServingServiceClient( @@ -1325,6 +1344,14 @@ def test_featurestore_online_serving_service_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_featurestore_online_serving_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1477,24 +1504,40 @@ def test_featurestore_online_serving_service_grpc_transport_client_cert_source_f ) -def test_featurestore_online_serving_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_online_serving_service_host_no_port(transport_name): client = FeaturestoreOnlineServingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_featurestore_online_serving_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_online_serving_service_host_with_port(transport_name): client = FeaturestoreOnlineServingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_featurestore_online_serving_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py index f4179185d2..375facda4f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -109,24 +109,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), ], ) -def test_featurestore_service_client_from_service_account_info(client_class): +def test_featurestore_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -155,27 +157,33 @@ def test_featurestore_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), ], ) -def test_featurestore_service_client_from_service_account_file(client_class): +def test_featurestore_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_featurestore_service_client_get_transport_class(): @@ -1572,7 +1580,7 @@ async def test_list_featurestores_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1620,7 +1628,9 @@ async def test_list_featurestores_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_featurestores(request={})).pages: + async for page_ in ( + await client.list_featurestores(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3017,7 +3027,7 @@ async def test_list_entity_types_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3065,7 +3075,9 @@ async def test_list_entity_types_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_entity_types(request={})).pages: + async for page_ in ( + await client.list_entity_types(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4696,7 +4708,7 @@ async def test_list_features_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4742,7 +4754,9 @@ async def test_list_features_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_features(request={})).pages: + async for page_ in ( + await client.list_features(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6330,7 +6344,7 @@ async def test_search_features_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6376,7 +6390,9 @@ async def test_search_features_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.search_features(request={})).pages: + async for page_ in ( + await client.search_features(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6473,6 +6489,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = FeaturestoreServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = FeaturestoreServiceClient( @@ -6539,6 +6568,14 @@ def test_featurestore_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_featurestore_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -6686,24 +6723,40 @@ def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( ) -def test_featurestore_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_service_host_no_port(transport_name): client = FeaturestoreServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_featurestore_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_service_host_with_port(transport_name): client = FeaturestoreServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_featurestore_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 29f361b97b..e42233265c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -102,24 +102,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_endpoint_service_client_from_service_account_info(client_class): +def test_index_endpoint_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -148,27 +150,33 @@ def test_index_endpoint_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_endpoint_service_client_from_service_account_file(client_class): +def test_index_endpoint_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_index_endpoint_service_client_get_transport_class(): @@ -1581,7 +1589,7 @@ async def test_list_index_endpoints_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1629,7 +1637,9 @@ async def test_list_index_endpoints_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_index_endpoints(request={})).pages: + async for page_ in ( + await client.list_index_endpoints(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2969,6 +2979,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = IndexEndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = IndexEndpointServiceClient( @@ -3023,6 +3046,14 @@ def test_index_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_index_endpoint_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3170,24 +3201,40 @@ def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( ) -def test_index_endpoint_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_endpoint_service_host_no_port(transport_name): client = IndexEndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_index_endpoint_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_endpoint_service_host_with_port(transport_name): client = IndexEndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_index_endpoint_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py index 1723167f87..bae06be190 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -94,24 +94,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexServiceClient, - IndexServiceAsyncClient, + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_service_client_from_service_account_info(client_class): +def test_index_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -140,27 +140,31 @@ def test_index_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexServiceClient, - IndexServiceAsyncClient, + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_service_client_from_service_account_file(client_class): +def test_index_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_index_service_client_get_transport_class(): @@ -1486,7 +1490,7 @@ async def test_list_indexes_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1532,7 +1536,9 @@ async def test_list_indexes_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_indexes(request={})).pages: + async for page_ in ( + await client.list_indexes(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2091,6 +2097,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = IndexServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = IndexServiceClient( @@ -2142,6 +2161,14 @@ def test_index_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_index_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2284,24 +2311,40 @@ def test_index_service_grpc_transport_client_cert_source_for_mtls(transport_clas ) -def test_index_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_service_host_no_port(transport_name): client = IndexServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_index_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_service_host_with_port(transport_name): client = IndexServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_index_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 033ecb192b..fee4114d25 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -125,24 +125,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - JobServiceClient, - JobServiceAsyncClient, + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), ], ) -def test_job_service_client_from_service_account_info(client_class): +def test_job_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -171,27 +171,31 @@ def test_job_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - JobServiceClient, - JobServiceAsyncClient, + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), ], ) -def test_job_service_client_from_service_account_file(client_class): +def test_job_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_job_service_client_get_transport_class(): @@ -1531,7 +1535,7 @@ async def test_list_custom_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1577,7 +1581,9 @@ async def test_list_custom_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: + async for page_ in ( + await client.list_custom_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3011,7 +3017,7 @@ async def test_list_data_labeling_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3059,7 +3065,9 @@ async def test_list_data_labeling_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: + async for page_ in ( + await client.list_data_labeling_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4487,7 +4495,7 @@ async def test_list_hyperparameter_tuning_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4540,7 +4548,7 @@ async def test_list_hyperparameter_tuning_jobs_async_pages(): pages = [] async for page_ in ( await client.list_hyperparameter_tuning_jobs(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5955,7 +5963,7 @@ async def test_list_batch_prediction_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6005,7 +6013,9 @@ async def test_list_batch_prediction_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: + async for page_ in ( + await client.list_batch_prediction_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -7246,7 +7256,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -7301,7 +7311,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): pages = [] async for page_ in ( await client.search_model_deployment_monitoring_stats_anomalies(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -7988,7 +7998,7 @@ async def test_list_model_deployment_monitoring_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -8041,7 +8051,7 @@ async def test_list_model_deployment_monitoring_jobs_async_pages(): pages = [] async for page_ in ( await client.list_model_deployment_monitoring_jobs(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9116,6 +9126,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = JobServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = JobServiceClient( @@ -9190,6 +9213,14 @@ def test_job_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_job_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9332,24 +9363,40 @@ def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class) ) -def test_job_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_job_service_host_no_port(transport_name): client = JobServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_job_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_job_service_host_with_port(transport_name): client = JobServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_job_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py index c7ebf08bcc..420e587fa6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -109,24 +109,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MetadataServiceClient, - MetadataServiceAsyncClient, + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), ], ) -def test_metadata_service_client_from_service_account_info(client_class): +def test_metadata_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -155,27 +157,33 @@ def test_metadata_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MetadataServiceClient, - MetadataServiceAsyncClient, + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), ], ) -def test_metadata_service_client_from_service_account_file(client_class): +def test_metadata_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_metadata_service_client_get_transport_class(): @@ -1574,7 +1582,7 @@ async def test_list_metadata_stores_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1622,7 +1630,9 @@ async def test_list_metadata_stores_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_metadata_stores(request={})).pages: + async for page_ in ( + await client.list_metadata_stores(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2771,7 +2781,7 @@ async def test_list_artifacts_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -2817,7 +2827,9 @@ async def test_list_artifacts_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_artifacts(request={})).pages: + async for page_ in ( + await client.list_artifacts(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4435,7 +4447,7 @@ async def test_list_contexts_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4481,7 +4493,9 @@ async def test_list_contexts_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_contexts(request={})).pages: + async for page_ in ( + await client.list_contexts(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6853,7 +6867,7 @@ async def test_list_executions_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6899,7 +6913,9 @@ async def test_list_executions_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_executions(request={})).pages: + async for page_ in ( + await client.list_executions(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9063,7 +9079,7 @@ async def test_list_metadata_schemas_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -9111,7 +9127,9 @@ async def test_list_metadata_schemas_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_metadata_schemas(request={})).pages: + async for page_ in ( + await client.list_metadata_schemas(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9449,6 +9467,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = MetadataServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = MetadataServiceClient( @@ -9526,6 +9557,14 @@ def test_metadata_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_metadata_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9671,24 +9710,40 @@ def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_metadata_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_metadata_service_host_no_port(transport_name): client = MetadataServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_metadata_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_metadata_service_host_with_port(transport_name): client = MetadataServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_metadata_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 2fd34a5c08..25b945c669 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -94,24 +94,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MigrationServiceClient, - MigrationServiceAsyncClient, + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), ], ) -def test_migration_service_client_from_service_account_info(client_class): +def test_migration_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -140,27 +142,33 @@ def test_migration_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MigrationServiceClient, - MigrationServiceAsyncClient, + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), ], ) -def test_migration_service_client_from_service_account_file(client_class): +def test_migration_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_migration_service_client_get_transport_class(): @@ -1049,7 +1057,7 @@ async def test_search_migratable_resources_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1099,7 +1107,9 @@ async def test_search_migratable_resources_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: + async for page_ in ( + await client.search_migratable_resources(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -1483,6 +1493,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = MigrationServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = MigrationServiceClient( @@ -1531,6 +1554,14 @@ def test_migration_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_migration_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1676,24 +1707,40 @@ def test_migration_service_grpc_transport_client_cert_source_for_mtls(transport_ ) -def test_migration_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_migration_service_host_no_port(transport_name): client = MigrationServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_migration_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_migration_service_host_with_port(transport_name): client = MigrationServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_migration_service_grpc_transport_channel(): @@ -1912,19 +1959,22 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1934,22 +1984,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index 63ed851a25..fec266b88f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -102,24 +102,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - ModelServiceClient, - ModelServiceAsyncClient, + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), ], ) -def test_model_service_client_from_service_account_info(client_class): +def test_model_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -148,27 +148,31 @@ def test_model_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - ModelServiceClient, - ModelServiceAsyncClient, + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), ], ) -def test_model_service_client_from_service_account_file(client_class): +def test_model_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_model_service_client_get_transport_class(): @@ -1534,7 +1538,7 @@ async def test_list_models_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1580,7 +1584,9 @@ async def test_list_models_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_models(request={})).pages: + async for page_ in ( + await client.list_models(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3277,7 +3283,7 @@ async def test_list_model_evaluations_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3325,7 +3331,9 @@ async def test_list_model_evaluations_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: + async for page_ in ( + await client.list_model_evaluations(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3971,7 +3979,7 @@ async def test_list_model_evaluation_slices_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4024,7 +4032,7 @@ async def test_list_model_evaluation_slices_async_pages(): pages = [] async for page_ in ( await client.list_model_evaluation_slices(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4121,6 +4129,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = ModelServiceClient( @@ -4178,6 +4199,14 @@ def test_model_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_model_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -4320,24 +4349,40 @@ def test_model_service_grpc_transport_client_cert_source_for_mtls(transport_clas ) -def test_model_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_model_service_host_no_port(transport_name): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_model_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_model_service_host_with_port(transport_name): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_model_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 1e4c25aa0a..c1b9ce0dad 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -114,24 +114,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PipelineServiceClient, - PipelineServiceAsyncClient, + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), ], ) -def test_pipeline_service_client_from_service_account_info(client_class): +def test_pipeline_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -160,27 +162,33 @@ def test_pipeline_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PipelineServiceClient, - PipelineServiceAsyncClient, + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), ], ) -def test_pipeline_service_client_from_service_account_file(client_class): +def test_pipeline_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_pipeline_service_client_get_transport_class(): @@ -1595,7 +1603,7 @@ async def test_list_training_pipelines_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1643,7 +1651,9 @@ async def test_list_training_pipelines_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: + async for page_ in ( + await client.list_training_pipelines(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3043,7 +3053,7 @@ async def test_list_pipeline_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3091,7 +3101,9 @@ async def test_list_pipeline_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: + async for page_ in ( + await client.list_pipeline_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3664,6 +3676,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = PipelineServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = PipelineServiceClient( @@ -3720,6 +3745,14 @@ def test_pipeline_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_pipeline_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3865,24 +3898,40 @@ def test_pipeline_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_pipeline_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_pipeline_service_host_no_port(transport_name): client = PipelineServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_pipeline_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_pipeline_service_host_with_port(transport_name): client = PipelineServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_pipeline_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index ce61f3774c..d7db36793c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -93,24 +93,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PredictionServiceClient, - PredictionServiceAsyncClient, + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), ], ) -def test_prediction_service_client_from_service_account_info(client_class): +def test_prediction_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -139,27 +141,33 @@ def test_prediction_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PredictionServiceClient, - PredictionServiceAsyncClient, + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), ], ) -def test_prediction_service_client_from_service_account_file(client_class): +def test_prediction_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_prediction_service_client_get_transport_class(): @@ -1372,6 +1380,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = PredictionServiceClient( @@ -1416,6 +1437,14 @@ def test_prediction_service_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_prediction_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1561,24 +1590,40 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport ) -def test_prediction_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_prediction_service_host_no_port(transport_name): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_prediction_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_prediction_service_host_with_port(transport_name): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_prediction_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index 473dbdde96..3bac3cac14 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -99,24 +99,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), ], ) -def test_specialist_pool_service_client_from_service_account_info(client_class): +def test_specialist_pool_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -145,27 +147,33 @@ def test_specialist_pool_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), ], ) -def test_specialist_pool_service_client_from_service_account_file(client_class): +def test_specialist_pool_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_specialist_pool_service_client_get_transport_class(): @@ -1578,7 +1586,7 @@ async def test_list_specialist_pools_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1626,7 +1634,9 @@ async def test_list_specialist_pools_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: + async for page_ in ( + await client.list_specialist_pools(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2215,6 +2225,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = SpecialistPoolServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = SpecialistPoolServiceClient( @@ -2266,6 +2289,14 @@ def test_specialist_pool_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_specialist_pool_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2415,24 +2446,40 @@ def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( ) -def test_specialist_pool_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_specialist_pool_service_host_no_port(transport_name): client = SpecialistPoolServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_specialist_pool_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_specialist_pool_service_host_with_port(transport_name): client = SpecialistPoolServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_specialist_pool_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py index 1e8b5d479c..9b9c00c7b7 100644 --- a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py @@ -112,24 +112,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), ], ) -def test_tensorboard_service_client_from_service_account_info(client_class): +def test_tensorboard_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -158,27 +160,33 @@ def test_tensorboard_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), ], ) -def test_tensorboard_service_client_from_service_account_file(client_class): +def test_tensorboard_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_tensorboard_service_client_get_transport_class(): @@ -1823,7 +1831,7 @@ async def test_list_tensorboards_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1871,7 +1879,9 @@ async def test_list_tensorboards_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_tensorboards(request={})).pages: + async for page_ in ( + await client.list_tensorboards(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3342,7 +3352,7 @@ async def test_list_tensorboard_experiments_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3395,7 +3405,7 @@ async def test_list_tensorboard_experiments_async_pages(): pages = [] async for page_ in ( await client.list_tensorboard_experiments(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5099,7 +5109,7 @@ async def test_list_tensorboard_runs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -5147,7 +5157,9 @@ async def test_list_tensorboard_runs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_tensorboard_runs(request={})).pages: + async for page_ in ( + await client.list_tensorboard_runs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6938,7 +6950,7 @@ async def test_list_tensorboard_time_series_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6991,7 +7003,7 @@ async def test_list_tensorboard_time_series_async_pages(): pages = [] async for page_ in ( await client.list_tensorboard_time_series(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -8942,7 +8954,7 @@ async def test_export_tensorboard_time_series_data_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -8994,7 +9006,7 @@ async def test_export_tensorboard_time_series_data_async_pages(): pages = [] async for page_ in ( await client.export_tensorboard_time_series_data(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9091,6 +9103,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = TensorboardServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = TensorboardServiceClient( @@ -9165,6 +9190,14 @@ def test_tensorboard_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_tensorboard_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9324,24 +9357,40 @@ def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( ) -def test_tensorboard_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_tensorboard_service_host_no_port(transport_name): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_tensorboard_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_tensorboard_service_host_with_port(transport_name): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_tensorboard_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py index 9f40c7ab1f..00a6a1b5dc 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -95,24 +95,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - VizierServiceClient, - VizierServiceAsyncClient, + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), ], ) -def test_vizier_service_client_from_service_account_info(client_class): +def test_vizier_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -141,27 +141,31 @@ def test_vizier_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - VizierServiceClient, - VizierServiceAsyncClient, + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), ], ) -def test_vizier_service_client_from_service_account_file(client_class): +def test_vizier_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_vizier_service_client_get_transport_class(): @@ -1507,7 +1511,7 @@ async def test_list_studies_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1553,7 +1557,9 @@ async def test_list_studies_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_studies(request={})).pages: + async for page_ in ( + await client.list_studies(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3035,7 +3041,7 @@ async def test_list_trials_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3081,7 +3087,9 @@ async def test_list_trials_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_trials(request={})).pages: + async for page_ in ( + await client.list_trials(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4309,6 +4317,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = VizierServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = VizierServiceClient( @@ -4370,6 +4391,14 @@ def test_vizier_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_vizier_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -4515,24 +4544,40 @@ def test_vizier_service_grpc_transport_client_cert_source_for_mtls(transport_cla ) -def test_vizier_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_vizier_service_host_no_port(transport_name): client = VizierServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_vizier_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_vizier_service_host_with_port(transport_name): client = VizierServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_vizier_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index ae7581bb05..244de7ddb9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -105,24 +105,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - DatasetServiceClient, - DatasetServiceAsyncClient, + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), ], ) -def test_dataset_service_client_from_service_account_info(client_class): +def test_dataset_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -151,27 +151,31 @@ def test_dataset_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - DatasetServiceClient, - DatasetServiceAsyncClient, + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), ], ) -def test_dataset_service_client_from_service_account_file(client_class): +def test_dataset_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_dataset_service_client_get_transport_class(): @@ -1761,7 +1765,7 @@ async def test_list_datasets_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1807,7 +1811,9 @@ async def test_list_datasets_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_datasets(request={})).pages: + async for page_ in ( + await client.list_datasets(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2913,7 +2919,7 @@ async def test_list_data_items_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -2959,7 +2965,9 @@ async def test_list_data_items_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_data_items(request={})).pages: + async for page_ in ( + await client.list_data_items(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3586,7 +3594,7 @@ async def test_list_annotations_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3632,7 +3640,9 @@ async def test_list_annotations_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_annotations(request={})).pages: + async for page_ in ( + await client.list_annotations(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3729,6 +3739,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = DatasetServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = DatasetServiceClient( @@ -3785,6 +3808,14 @@ def test_dataset_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_dataset_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3930,24 +3961,40 @@ def test_dataset_service_grpc_transport_client_cert_source_for_mtls(transport_cl ) -def test_dataset_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_dataset_service_host_no_port(transport_name): client = DatasetServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_dataset_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_dataset_service_host_with_port(transport_name): client = DatasetServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_dataset_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index d742aff8d3..2249c3e592 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -106,24 +106,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - EndpointServiceClient, - EndpointServiceAsyncClient, + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_endpoint_service_client_from_service_account_info(client_class): +def test_endpoint_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -152,27 +154,33 @@ def test_endpoint_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - EndpointServiceClient, - EndpointServiceAsyncClient, + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_endpoint_service_client_from_service_account_file(client_class): +def test_endpoint_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_endpoint_service_client_get_transport_class(): @@ -1542,7 +1550,7 @@ async def test_list_endpoints_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1588,7 +1596,9 @@ async def test_list_endpoints_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: + async for page_ in ( + await client.list_endpoints(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2711,6 +2721,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = EndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = EndpointServiceClient( @@ -2764,6 +2787,14 @@ def test_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_endpoint_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2909,24 +2940,40 @@ def test_endpoint_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_endpoint_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_endpoint_service_host_no_port(transport_name): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_endpoint_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_endpoint_service_host_with_port(transport_name): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_endpoint_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py index 64061f6a3b..409363dbe9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -100,14 +100,14 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), ], ) def test_featurestore_online_serving_service_client_from_service_account_info( - client_class, + client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( @@ -115,11 +115,11 @@ def test_featurestore_online_serving_service_client_from_service_account_info( ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -151,29 +151,33 @@ def test_featurestore_online_serving_service_client_service_account_always_use_j @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), ], ) def test_featurestore_online_serving_service_client_from_service_account_file( - client_class, + client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_featurestore_online_serving_service_client_get_transport_class(): @@ -1282,6 +1286,21 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class( + transport_name + )( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = FeaturestoreOnlineServingServiceClient( @@ -1325,6 +1344,14 @@ def test_featurestore_online_serving_service_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_featurestore_online_serving_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1477,24 +1504,40 @@ def test_featurestore_online_serving_service_grpc_transport_client_cert_source_f ) -def test_featurestore_online_serving_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_online_serving_service_host_no_port(transport_name): client = FeaturestoreOnlineServingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_featurestore_online_serving_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_online_serving_service_host_with_port(transport_name): client = FeaturestoreOnlineServingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_featurestore_online_serving_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index f8782f9c5e..5aa43d8485 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -110,24 +110,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), ], ) -def test_featurestore_service_client_from_service_account_info(client_class): +def test_featurestore_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -156,27 +158,33 @@ def test_featurestore_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), ], ) -def test_featurestore_service_client_from_service_account_file(client_class): +def test_featurestore_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_featurestore_service_client_get_transport_class(): @@ -1573,7 +1581,7 @@ async def test_list_featurestores_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1621,7 +1629,9 @@ async def test_list_featurestores_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_featurestores(request={})).pages: + async for page_ in ( + await client.list_featurestores(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3018,7 +3028,7 @@ async def test_list_entity_types_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3066,7 +3076,9 @@ async def test_list_entity_types_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_entity_types(request={})).pages: + async for page_ in ( + await client.list_entity_types(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4697,7 +4709,7 @@ async def test_list_features_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4743,7 +4755,9 @@ async def test_list_features_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_features(request={})).pages: + async for page_ in ( + await client.list_features(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6331,7 +6345,7 @@ async def test_search_features_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6377,7 +6391,9 @@ async def test_search_features_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.search_features(request={})).pages: + async for page_ in ( + await client.search_features(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6474,6 +6490,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = FeaturestoreServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = FeaturestoreServiceClient( @@ -6540,6 +6569,14 @@ def test_featurestore_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_featurestore_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -6687,24 +6724,40 @@ def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( ) -def test_featurestore_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_service_host_no_port(transport_name): client = FeaturestoreServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_featurestore_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_featurestore_service_host_with_port(transport_name): client = FeaturestoreServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_featurestore_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index f66c6fe7d5..99b3db3ec9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -102,24 +102,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_endpoint_service_client_from_service_account_info(client_class): +def test_index_endpoint_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -148,27 +150,33 @@ def test_index_endpoint_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_endpoint_service_client_from_service_account_file(client_class): +def test_index_endpoint_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_index_endpoint_service_client_get_transport_class(): @@ -1581,7 +1589,7 @@ async def test_list_index_endpoints_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1629,7 +1637,9 @@ async def test_list_index_endpoints_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_index_endpoints(request={})).pages: + async for page_ in ( + await client.list_index_endpoints(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2969,6 +2979,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = IndexEndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = IndexEndpointServiceClient( @@ -3023,6 +3046,14 @@ def test_index_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_index_endpoint_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3170,24 +3201,40 @@ def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( ) -def test_index_endpoint_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_endpoint_service_host_no_port(transport_name): client = IndexEndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_index_endpoint_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_endpoint_service_host_with_port(transport_name): client = IndexEndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_index_endpoint_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py index 7db73c13f8..b909b9f52a 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -96,24 +96,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexServiceClient, - IndexServiceAsyncClient, + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_service_client_from_service_account_info(client_class): +def test_index_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -142,27 +142,31 @@ def test_index_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - IndexServiceClient, - IndexServiceAsyncClient, + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), ], ) -def test_index_service_client_from_service_account_file(client_class): +def test_index_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_index_service_client_get_transport_class(): @@ -1488,7 +1492,7 @@ async def test_list_indexes_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1534,7 +1538,9 @@ async def test_list_indexes_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_indexes(request={})).pages: + async for page_ in ( + await client.list_indexes(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2093,6 +2099,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = IndexServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = IndexServiceClient( @@ -2144,6 +2163,14 @@ def test_index_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_index_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2286,24 +2313,40 @@ def test_index_service_grpc_transport_client_cert_source_for_mtls(transport_clas ) -def test_index_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_service_host_no_port(transport_name): client = IndexServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_index_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_index_service_host_with_port(transport_name): client = IndexServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_index_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index a390c29a24..e66650d67d 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -127,24 +127,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - JobServiceClient, - JobServiceAsyncClient, + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), ], ) -def test_job_service_client_from_service_account_info(client_class): +def test_job_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -173,27 +173,31 @@ def test_job_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - JobServiceClient, - JobServiceAsyncClient, + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), ], ) -def test_job_service_client_from_service_account_file(client_class): +def test_job_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_job_service_client_get_transport_class(): @@ -1533,7 +1537,7 @@ async def test_list_custom_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1579,7 +1583,9 @@ async def test_list_custom_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: + async for page_ in ( + await client.list_custom_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3013,7 +3019,7 @@ async def test_list_data_labeling_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3061,7 +3067,9 @@ async def test_list_data_labeling_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: + async for page_ in ( + await client.list_data_labeling_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4489,7 +4497,7 @@ async def test_list_hyperparameter_tuning_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4542,7 +4550,7 @@ async def test_list_hyperparameter_tuning_jobs_async_pages(): pages = [] async for page_ in ( await client.list_hyperparameter_tuning_jobs(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5050,6 +5058,7 @@ def test_create_batch_prediction_job(request_type, transport: str = "grpc"): name="name_value", display_name="display_name_value", model="model_value", + model_version_id="model_version_id_value", service_account="service_account_value", generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, @@ -5066,6 +5075,7 @@ def test_create_batch_prediction_job(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.service_account == "service_account_value" assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED @@ -5113,6 +5123,7 @@ async def test_create_batch_prediction_job_async( name="name_value", display_name="display_name_value", model="model_value", + model_version_id="model_version_id_value", service_account="service_account_value", generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, @@ -5130,6 +5141,7 @@ async def test_create_batch_prediction_job_async( assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.service_account == "service_account_value" assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED @@ -5335,6 +5347,7 @@ def test_get_batch_prediction_job(request_type, transport: str = "grpc"): name="name_value", display_name="display_name_value", model="model_value", + model_version_id="model_version_id_value", service_account="service_account_value", generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, @@ -5351,6 +5364,7 @@ def test_get_batch_prediction_job(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.service_account == "service_account_value" assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED @@ -5398,6 +5412,7 @@ async def test_get_batch_prediction_job_async( name="name_value", display_name="display_name_value", model="model_value", + model_version_id="model_version_id_value", service_account="service_account_value", generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, @@ -5415,6 +5430,7 @@ async def test_get_batch_prediction_job_async( assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.service_account == "service_account_value" assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED @@ -5965,7 +5981,7 @@ async def test_list_batch_prediction_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6015,7 +6031,9 @@ async def test_list_batch_prediction_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: + async for page_ in ( + await client.list_batch_prediction_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -7256,7 +7274,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -7311,7 +7329,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): pages = [] async for page_ in ( await client.search_model_deployment_monitoring_stats_anomalies(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -7998,7 +8016,7 @@ async def test_list_model_deployment_monitoring_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -8051,7 +8069,7 @@ async def test_list_model_deployment_monitoring_jobs_async_pages(): pages = [] async for page_ in ( await client.list_model_deployment_monitoring_jobs(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9126,6 +9144,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = JobServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = JobServiceClient( @@ -9200,6 +9231,14 @@ def test_job_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_job_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9342,24 +9381,40 @@ def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class) ) -def test_job_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_job_service_host_no_port(transport_name): client = JobServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_job_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_job_service_host_with_port(transport_name): client = JobServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_job_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py index 7f8fd96431..bd09e856f9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -111,24 +111,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MetadataServiceClient, - MetadataServiceAsyncClient, + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), ], ) -def test_metadata_service_client_from_service_account_info(client_class): +def test_metadata_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -157,27 +159,33 @@ def test_metadata_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MetadataServiceClient, - MetadataServiceAsyncClient, + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), ], ) -def test_metadata_service_client_from_service_account_file(client_class): +def test_metadata_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_metadata_service_client_get_transport_class(): @@ -1576,7 +1584,7 @@ async def test_list_metadata_stores_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1624,7 +1632,9 @@ async def test_list_metadata_stores_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_metadata_stores(request={})).pages: + async for page_ in ( + await client.list_metadata_stores(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2773,7 +2783,7 @@ async def test_list_artifacts_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -2819,7 +2829,9 @@ async def test_list_artifacts_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_artifacts(request={})).pages: + async for page_ in ( + await client.list_artifacts(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4437,7 +4449,7 @@ async def test_list_contexts_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4483,7 +4495,9 @@ async def test_list_contexts_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_contexts(request={})).pages: + async for page_ in ( + await client.list_contexts(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6855,7 +6869,7 @@ async def test_list_executions_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6901,7 +6915,9 @@ async def test_list_executions_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_executions(request={})).pages: + async for page_ in ( + await client.list_executions(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9065,7 +9081,7 @@ async def test_list_metadata_schemas_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -9113,7 +9129,9 @@ async def test_list_metadata_schemas_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_metadata_schemas(request={})).pages: + async for page_ in ( + await client.list_metadata_schemas(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9451,6 +9469,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = MetadataServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = MetadataServiceClient( @@ -9528,6 +9559,14 @@ def test_metadata_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_metadata_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9673,24 +9712,40 @@ def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_metadata_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_metadata_service_host_no_port(transport_name): client = MetadataServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_metadata_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_metadata_service_host_with_port(transport_name): client = MetadataServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_metadata_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 720b1c5b78..7f195de7f3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -96,24 +96,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MigrationServiceClient, - MigrationServiceAsyncClient, + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), ], ) -def test_migration_service_client_from_service_account_info(client_class): +def test_migration_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -142,27 +144,33 @@ def test_migration_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - MigrationServiceClient, - MigrationServiceAsyncClient, + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), ], ) -def test_migration_service_client_from_service_account_file(client_class): +def test_migration_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_migration_service_client_get_transport_class(): @@ -1051,7 +1059,7 @@ async def test_search_migratable_resources_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1101,7 +1109,9 @@ async def test_search_migratable_resources_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: + async for page_ in ( + await client.search_migratable_resources(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -1485,6 +1495,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = MigrationServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = MigrationServiceClient( @@ -1533,6 +1556,14 @@ def test_migration_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_migration_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1678,24 +1709,40 @@ def test_migration_service_grpc_transport_client_cert_source_for_mtls(transport_ ) -def test_migration_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_migration_service_host_no_port(transport_name): client = MigrationServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_migration_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_migration_service_host_with_port(transport_name): client = MigrationServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_migration_service_grpc_transport_channel(): @@ -1888,19 +1935,22 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -1910,22 +1960,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "squid" + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index e05eb91e2d..8a0940ae07 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -106,24 +106,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - ModelServiceClient, - ModelServiceAsyncClient, + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), ], ) -def test_model_service_client_from_service_account_info(client_class): +def test_model_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -152,27 +152,31 @@ def test_model_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - ModelServiceClient, - ModelServiceAsyncClient, + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), ], ) -def test_model_service_client_from_service_account_file(client_class): +def test_model_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_model_service_client_get_transport_class(): @@ -910,8 +914,11 @@ def test_get_model(request_type, transport: str = "grpc"): # Designate an appropriate return value for the call. call.return_value = model.Model( name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], display_name="display_name_value", description="description_value", + version_description="version_description_value", metadata_schema_uri="metadata_schema_uri_value", training_pipeline="training_pipeline_value", artifact_uri="artifact_uri_value", @@ -932,8 +939,11 @@ def test_get_model(request_type, transport: str = "grpc"): # Establish that the response is the type that we expect. assert isinstance(response, model.Model) assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] assert response.display_name == "display_name_value" assert response.description == "description_value" + assert response.version_description == "version_description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.training_pipeline == "training_pipeline_value" assert response.artifact_uri == "artifact_uri_value" @@ -984,8 +994,11 @@ async def test_get_model_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( model.Model( name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], display_name="display_name_value", description="description_value", + version_description="version_description_value", metadata_schema_uri="metadata_schema_uri_value", training_pipeline="training_pipeline_value", artifact_uri="artifact_uri_value", @@ -1011,8 +1024,11 @@ async def test_get_model_async( # Establish that the response is the type that we expect. assert isinstance(response, model.Model) assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] assert response.display_name == "display_name_value" assert response.description == "description_value" + assert response.version_description == "version_description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.training_pipeline == "training_pipeline_value" assert response.artifact_uri == "artifact_uri_value" @@ -1538,7 +1554,7 @@ async def test_list_models_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1584,7 +1600,9 @@ async def test_list_models_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_models(request={})).pages: + async for page_ in ( + await client.list_models(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -1593,11 +1611,11 @@ async def test_list_models_async_pages(): @pytest.mark.parametrize( "request_type", [ - model_service.UpdateModelRequest, + model_service.ListModelVersionsRequest, dict, ], ) -def test_update_model(request_type, transport: str = "grpc"): +def test_list_model_versions(request_type, transport: str = "grpc"): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -1608,50 +1626,26 @@ def test_update_model(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name="name_value", - display_name="display_name_value", - description="description_value", - metadata_schema_uri="metadata_schema_uri_value", - training_pipeline="training_pipeline_value", - artifact_uri="artifact_uri_value", - supported_deployment_resources_types=[ - gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES - ], - supported_input_storage_formats=["supported_input_storage_formats_value"], - supported_output_storage_formats=["supported_output_storage_formats_value"], - etag="etag_value", + call.return_value = model_service.ListModelVersionsResponse( + next_page_token="next_page_token_value", ) - response = client.update_model(request) + response = client.list_model_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() + assert args[0] == model_service.ListModelVersionsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.description == "description_value" - assert response.metadata_schema_uri == "metadata_schema_uri_value" - assert response.training_pipeline == "training_pipeline_value" - assert response.artifact_uri == "artifact_uri_value" - assert response.supported_deployment_resources_types == [ - gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES - ] - assert response.supported_input_storage_formats == [ - "supported_input_storage_formats_value" - ] - assert response.supported_output_storage_formats == [ - "supported_output_storage_formats_value" - ] - assert response.etag == "etag_value" + assert isinstance(response, pagers.ListModelVersionsPager) + assert response.next_page_token == "next_page_token_value" -def test_update_model_empty_call(): +def test_list_model_versions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ModelServiceClient( @@ -1660,16 +1654,18 @@ def test_update_model_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: - client.update_model() + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + client.list_model_versions() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() + assert args[0] == model_service.ListModelVersionsRequest() @pytest.mark.asyncio -async def test_update_model_async( - transport: str = "grpc_asyncio", request_type=model_service.UpdateModelRequest +async def test_list_model_versions_async( + transport: str = "grpc_asyncio", request_type=model_service.ListModelVersionsRequest ): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1681,75 +1677,1018 @@ async def test_update_model_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gca_model.Model( - name="name_value", - display_name="display_name_value", - description="description_value", - metadata_schema_uri="metadata_schema_uri_value", - training_pipeline="training_pipeline_value", - artifact_uri="artifact_uri_value", - supported_deployment_resources_types=[ - gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES - ], - supported_input_storage_formats=[ - "supported_input_storage_formats_value" - ], - supported_output_storage_formats=[ - "supported_output_storage_formats_value" - ], - etag="etag_value", + model_service.ListModelVersionsResponse( + next_page_token="next_page_token_value", ) ) - response = await client.update_model(request) + response = await client.list_model_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() + assert args[0] == model_service.ListModelVersionsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.description == "description_value" - assert response.metadata_schema_uri == "metadata_schema_uri_value" - assert response.training_pipeline == "training_pipeline_value" - assert response.artifact_uri == "artifact_uri_value" - assert response.supported_deployment_resources_types == [ - gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES - ] - assert response.supported_input_storage_formats == [ - "supported_input_storage_formats_value" - ] - assert response.supported_output_storage_formats == [ - "supported_output_storage_formats_value" - ] - assert response.etag == "etag_value" + assert isinstance(response, pagers.ListModelVersionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_model_versions_async_from_dict(): + await test_list_model_versions_async(request_type=dict) + + +def test_list_model_versions_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + call.return_value = model_service.ListModelVersionsResponse() + client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_versions_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelVersionsResponse() + ) + await client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] + + +def test_list_model_versions_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_versions( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_list_model_versions_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_versions( + model_service.ListModelVersionsRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelVersionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_versions( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_versions( + model_service.ListModelVersionsRequest(), + name="name_value", + ) + + +def test_list_model_versions_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_model_versions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + +def test_list_model_versions_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_versions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_versions_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_versions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_versions_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_model_versions(request={}) + ).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.UpdateModelRequest, + dict, + ], +) +def test_update_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], + display_name="display_name_value", + description="description_value", + version_description="version_description_value", + metadata_schema_uri="metadata_schema_uri_value", + training_pipeline="training_pipeline_value", + artifact_uri="artifact_uri_value", + supported_deployment_resources_types=[ + gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ], + supported_input_storage_formats=["supported_input_storage_formats_value"], + supported_output_storage_formats=["supported_output_storage_formats_value"], + etag="etag_value", + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.version_description == "version_description_value" + assert response.metadata_schema_uri == "metadata_schema_uri_value" + assert response.training_pipeline == "training_pipeline_value" + assert response.artifact_uri == "artifact_uri_value" + assert response.supported_deployment_resources_types == [ + gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ] + assert response.supported_input_storage_formats == [ + "supported_input_storage_formats_value" + ] + assert response.supported_output_storage_formats == [ + "supported_output_storage_formats_value" + ] + assert response.etag == "etag_value" + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + +@pytest.mark.asyncio +async def test_update_model_async( + transport: str = "grpc_asyncio", request_type=model_service.UpdateModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model.Model( + name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], + display_name="display_name_value", + description="description_value", + version_description="version_description_value", + metadata_schema_uri="metadata_schema_uri_value", + training_pipeline="training_pipeline_value", + artifact_uri="artifact_uri_value", + supported_deployment_resources_types=[ + gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ], + supported_input_storage_formats=[ + "supported_input_storage_formats_value" + ], + supported_output_storage_formats=[ + "supported_output_storage_formats_value" + ], + etag="etag_value", + ) + ) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.version_description == "version_description_value" + assert response.metadata_schema_uri == "metadata_schema_uri_value" + assert response.training_pipeline == "training_pipeline_value" + assert response.artifact_uri == "artifact_uri_value" + assert response.supported_deployment_resources_types == [ + gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ] + assert response.supported_input_storage_formats == [ + "supported_input_storage_formats_value" + ] + assert response.supported_output_storage_formats == [ + "supported_output_storage_formats_value" + ] + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = "model.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model.name=model.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = "model.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model.name=model.name/value", + ) in kw["metadata"] + + +def test_update_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.DeleteModelRequest, + dict, + ], +) +def test_delete_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + +@pytest.mark.asyncio +async def test_delete_model_async( + transport: str = "grpc_asyncio", request_type=model_service.DeleteModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] + + +def test_delete_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + model_service.DeleteModelRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + model_service.DeleteModelRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.DeleteModelVersionRequest, + dict, + ], +) +def test_delete_model_version(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_version_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: + client.delete_model_version() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + +@pytest.mark.asyncio +async def test_delete_model_version_async( + transport: str = "grpc_asyncio", + request_type=model_service.DeleteModelVersionRequest, +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) +async def test_delete_model_version_async_from_dict(): + await test_delete_model_version_async(request_type=dict) -def test_update_model_field_headers(): +def test_delete_model_version_field_headers(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() + request = model_service.DeleteModelVersionRequest() - request.model.name = "model.name/value" + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: - call.return_value = gca_model.Model() - client.update_model(request) + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_model_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -1760,26 +2699,30 @@ def test_update_model_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "model.name=model.name/value", + "name=name/value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_update_model_field_headers_async(): +async def test_delete_model_version_field_headers_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() + request = model_service.DeleteModelVersionRequest() - request.model.name = "model.name/value" + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_model_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -1790,39 +2733,37 @@ async def test_update_model_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "model.name=model.name/value", + "name=name/value", ) in kw["metadata"] -def test_update_model_flattened(): +def test_delete_model_version_flattened(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.delete_model_version( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_update_model_flattened_error(): +def test_delete_model_version_flattened_error(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1830,46 +2771,45 @@ def test_update_model_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_update_model_flattened_async(): +async def test_delete_model_version_flattened_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_model), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_model_version), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + response = await client.delete_model_version( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_update_model_flattened_error_async(): +async def test_delete_model_version_flattened_error_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1877,21 +2817,20 @@ async def test_update_model_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + await client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - model_service.DeleteModelRequest, + model_service.MergeVersionAliasesRequest, dict, ], ) -def test_delete_model(request_type, transport: str = "grpc"): +def test_merge_version_aliases(request_type, transport: str = "grpc"): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -1902,21 +2841,58 @@ def test_delete_model(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.delete_model(request) + call.return_value = model.Model( + name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], + display_name="display_name_value", + description="description_value", + version_description="version_description_value", + metadata_schema_uri="metadata_schema_uri_value", + training_pipeline="training_pipeline_value", + artifact_uri="artifact_uri_value", + supported_deployment_resources_types=[ + model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ], + supported_input_storage_formats=["supported_input_storage_formats_value"], + supported_output_storage_formats=["supported_output_storage_formats_value"], + etag="etag_value", + ) + response = client.merge_version_aliases(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() + assert args[0] == model_service.MergeVersionAliasesRequest() # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.version_description == "version_description_value" + assert response.metadata_schema_uri == "metadata_schema_uri_value" + assert response.training_pipeline == "training_pipeline_value" + assert response.artifact_uri == "artifact_uri_value" + assert response.supported_deployment_resources_types == [ + model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ] + assert response.supported_input_storage_formats == [ + "supported_input_storage_formats_value" + ] + assert response.supported_output_storage_formats == [ + "supported_output_storage_formats_value" + ] + assert response.etag == "etag_value" -def test_delete_model_empty_call(): +def test_merge_version_aliases_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ModelServiceClient( @@ -1925,16 +2901,19 @@ def test_delete_model_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: - client.delete_model() + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: + client.merge_version_aliases() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() + assert args[0] == model_service.MergeVersionAliasesRequest() @pytest.mark.asyncio -async def test_delete_model_async( - transport: str = "grpc_asyncio", request_type=model_service.DeleteModelRequest +async def test_merge_version_aliases_async( + transport: str = "grpc_asyncio", + request_type=model_service.MergeVersionAliasesRequest, ): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1946,42 +2925,85 @@ async def test_delete_model_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + model.Model( + name="name_value", + version_id="version_id_value", + version_aliases=["version_aliases_value"], + display_name="display_name_value", + description="description_value", + version_description="version_description_value", + metadata_schema_uri="metadata_schema_uri_value", + training_pipeline="training_pipeline_value", + artifact_uri="artifact_uri_value", + supported_deployment_resources_types=[ + model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ], + supported_input_storage_formats=[ + "supported_input_storage_formats_value" + ], + supported_output_storage_formats=[ + "supported_output_storage_formats_value" + ], + etag="etag_value", + ) ) - response = await client.delete_model(request) + response = await client.merge_version_aliases(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() + assert args[0] == model_service.MergeVersionAliasesRequest() # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.version_id == "version_id_value" + assert response.version_aliases == ["version_aliases_value"] + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.version_description == "version_description_value" + assert response.metadata_schema_uri == "metadata_schema_uri_value" + assert response.training_pipeline == "training_pipeline_value" + assert response.artifact_uri == "artifact_uri_value" + assert response.supported_deployment_resources_types == [ + model.Model.DeploymentResourcesType.DEDICATED_RESOURCES + ] + assert response.supported_input_storage_formats == [ + "supported_input_storage_formats_value" + ] + assert response.supported_output_storage_formats == [ + "supported_output_storage_formats_value" + ] + assert response.etag == "etag_value" @pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) +async def test_merge_version_aliases_async_from_dict(): + await test_merge_version_aliases_async(request_type=dict) -def test_delete_model_field_headers(): +def test_merge_version_aliases_field_headers(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() + request = model_service.MergeVersionAliasesRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.delete_model(request) + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: + call.return_value = model.Model() + client.merge_version_aliases(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -1997,23 +3019,23 @@ def test_delete_model_field_headers(): @pytest.mark.asyncio -async def test_delete_model_field_headers_async(): +async def test_merge_version_aliases_field_headers_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() + request = model_service.MergeVersionAliasesRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.delete_model(request) + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.merge_version_aliases(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -2028,19 +3050,22 @@ async def test_delete_model_field_headers_async(): ) in kw["metadata"] -def test_delete_model_flattened(): +def test_merge_version_aliases_flattened(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = model.Model() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_model( + client.merge_version_aliases( name="name_value", + version_aliases=["version_aliases_value"], ) # Establish that the underlying call was made with the expected @@ -2050,9 +3075,12 @@ def test_delete_model_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].version_aliases + mock_val = ["version_aliases_value"] + assert arg == mock_val -def test_delete_model_flattened_error(): +def test_merge_version_aliases_flattened_error(): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2060,30 +3088,32 @@ def test_delete_model_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_model( - model_service.DeleteModelRequest(), + client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), name="name_value", + version_aliases=["version_aliases_value"], ) @pytest.mark.asyncio -async def test_delete_model_flattened_async(): +async def test_merge_version_aliases_flattened_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + with mock.patch.object( + type(client.transport.merge_version_aliases), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = model.Model() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_model( + response = await client.merge_version_aliases( name="name_value", + version_aliases=["version_aliases_value"], ) # Establish that the underlying call was made with the expected @@ -2093,10 +3123,13 @@ async def test_delete_model_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].version_aliases + mock_val = ["version_aliases_value"] + assert arg == mock_val @pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): +async def test_merge_version_aliases_flattened_error_async(): client = ModelServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2104,9 +3137,10 @@ async def test_delete_model_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.delete_model( - model_service.DeleteModelRequest(), + await client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), name="name_value", + version_aliases=["version_aliases_value"], ) @@ -3265,7 +4299,7 @@ async def test_list_model_evaluations_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3313,7 +4347,9 @@ async def test_list_model_evaluations_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: + async for page_ in ( + await client.list_model_evaluations(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3959,7 +4995,7 @@ async def test_list_model_evaluation_slices_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4012,7 +5048,7 @@ async def test_list_model_evaluation_slices_async_pages(): pages = [] async for page_ in ( await client.list_model_evaluation_slices(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4109,6 +5145,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = ModelServiceClient( @@ -4145,8 +5194,11 @@ def test_model_service_base_transport(): "upload_model", "get_model", "list_models", + "list_model_versions", "update_model", "delete_model", + "delete_model_version", + "merge_version_aliases", "export_model", "import_model_evaluation", "get_model_evaluation", @@ -4166,6 +5218,14 @@ def test_model_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_model_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -4308,24 +5368,40 @@ def test_model_service_grpc_transport_client_cert_source_for_mtls(transport_clas ) -def test_model_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_model_service_host_no_port(transport_name): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_model_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_model_service_host_with_port(transport_name): client = ModelServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_model_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index 4e3950a6bb..4dcc573c07 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -118,24 +118,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PipelineServiceClient, - PipelineServiceAsyncClient, + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), ], ) -def test_pipeline_service_client_from_service_account_info(client_class): +def test_pipeline_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -164,27 +166,33 @@ def test_pipeline_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PipelineServiceClient, - PipelineServiceAsyncClient, + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), ], ) -def test_pipeline_service_client_from_service_account_file(client_class): +def test_pipeline_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_pipeline_service_client_get_transport_class(): @@ -710,6 +718,8 @@ def test_create_training_pipeline(request_type, transport: str = "grpc"): name="name_value", display_name="display_name_value", training_task_definition="training_task_definition_value", + model_id="model_id_value", + parent_model="parent_model_value", state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, ) response = client.create_training_pipeline(request) @@ -724,6 +734,8 @@ def test_create_training_pipeline(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.training_task_definition == "training_task_definition_value" + assert response.model_id == "model_id_value" + assert response.parent_model == "parent_model_value" assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED @@ -769,6 +781,8 @@ async def test_create_training_pipeline_async( name="name_value", display_name="display_name_value", training_task_definition="training_task_definition_value", + model_id="model_id_value", + parent_model="parent_model_value", state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, ) ) @@ -784,6 +798,8 @@ async def test_create_training_pipeline_async( assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.training_task_definition == "training_task_definition_value" + assert response.model_id == "model_id_value" + assert response.parent_model == "parent_model_value" assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED @@ -979,6 +995,8 @@ def test_get_training_pipeline(request_type, transport: str = "grpc"): name="name_value", display_name="display_name_value", training_task_definition="training_task_definition_value", + model_id="model_id_value", + parent_model="parent_model_value", state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, ) response = client.get_training_pipeline(request) @@ -993,6 +1011,8 @@ def test_get_training_pipeline(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.training_task_definition == "training_task_definition_value" + assert response.model_id == "model_id_value" + assert response.parent_model == "parent_model_value" assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED @@ -1038,6 +1058,8 @@ async def test_get_training_pipeline_async( name="name_value", display_name="display_name_value", training_task_definition="training_task_definition_value", + model_id="model_id_value", + parent_model="parent_model_value", state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, ) ) @@ -1053,6 +1075,8 @@ async def test_get_training_pipeline_async( assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.training_task_definition == "training_task_definition_value" + assert response.model_id == "model_id_value" + assert response.parent_model == "parent_model_value" assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED @@ -1599,7 +1623,7 @@ async def test_list_training_pipelines_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1647,7 +1671,9 @@ async def test_list_training_pipelines_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: + async for page_ in ( + await client.list_training_pipelines(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3047,7 +3073,7 @@ async def test_list_pipeline_jobs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3095,7 +3121,9 @@ async def test_list_pipeline_jobs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: + async for page_ in ( + await client.list_pipeline_jobs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3668,6 +3696,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = PipelineServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = PipelineServiceClient( @@ -3724,6 +3765,14 @@ def test_pipeline_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_pipeline_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -3869,24 +3918,40 @@ def test_pipeline_service_grpc_transport_client_cert_source_for_mtls(transport_c ) -def test_pipeline_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_pipeline_service_host_no_port(transport_name): client = PipelineServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_pipeline_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_pipeline_service_host_with_port(transport_name): client = PipelineServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_pipeline_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index 7b602f8e0c..830e533ade 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -94,24 +94,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PredictionServiceClient, - PredictionServiceAsyncClient, + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), ], ) -def test_prediction_service_client_from_service_account_info(client_class): +def test_prediction_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -140,27 +142,33 @@ def test_prediction_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - PredictionServiceClient, - PredictionServiceAsyncClient, + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), ], ) -def test_prediction_service_client_from_service_account_file(client_class): +def test_prediction_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_prediction_service_client_get_transport_class(): @@ -683,6 +691,7 @@ def test_predict(request_type, transport: str = "grpc"): call.return_value = prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", model="model_value", + model_version_id="model_version_id_value", model_display_name="model_display_name_value", ) response = client.predict(request) @@ -696,6 +705,7 @@ def test_predict(request_type, transport: str = "grpc"): assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.model_display_name == "model_display_name_value" @@ -735,6 +745,7 @@ async def test_predict_async( prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", model="model_value", + model_version_id="model_version_id_value", model_display_name="model_display_name_value", ) ) @@ -749,6 +760,7 @@ async def test_predict_async( assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" assert response.model == "model_value" + assert response.model_version_id == "model_version_id_value" assert response.model_display_name == "model_display_name_value" @@ -1373,6 +1385,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = PredictionServiceClient( @@ -1417,6 +1442,14 @@ def test_prediction_service_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_prediction_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -1562,24 +1595,40 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport ) -def test_prediction_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_prediction_service_host_no_port(transport_name): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_prediction_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_prediction_service_host_with_port(transport_name): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_prediction_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index 5ff5ef5d62..f9c5cd97a5 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -99,24 +99,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), ], ) -def test_specialist_pool_service_client_from_service_account_info(client_class): +def test_specialist_pool_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -145,27 +147,33 @@ def test_specialist_pool_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), ], ) -def test_specialist_pool_service_client_from_service_account_file(client_class): +def test_specialist_pool_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_specialist_pool_service_client_get_transport_class(): @@ -1578,7 +1586,7 @@ async def test_list_specialist_pools_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1626,7 +1634,9 @@ async def test_list_specialist_pools_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: + async for page_ in ( + await client.list_specialist_pools(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -2215,6 +2225,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = SpecialistPoolServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = SpecialistPoolServiceClient( @@ -2266,6 +2289,14 @@ def test_specialist_pool_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_specialist_pool_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2415,24 +2446,40 @@ def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( ) -def test_specialist_pool_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_specialist_pool_service_host_no_port(transport_name): client = SpecialistPoolServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_specialist_pool_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_specialist_pool_service_host_with_port(transport_name): client = SpecialistPoolServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_specialist_pool_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 51bc486cb1..0e304036ed 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -112,24 +112,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), ], ) -def test_tensorboard_service_client_from_service_account_info(client_class): +def test_tensorboard_service_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -158,27 +160,33 @@ def test_tensorboard_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), ], ) -def test_tensorboard_service_client_from_service_account_file(client_class): +def test_tensorboard_service_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_tensorboard_service_client_get_transport_class(): @@ -1820,7 +1828,7 @@ async def test_list_tensorboards_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1868,7 +1876,9 @@ async def test_list_tensorboards_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_tensorboards(request={})).pages: + async for page_ in ( + await client.list_tensorboards(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3339,7 +3349,7 @@ async def test_list_tensorboard_experiments_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3392,7 +3402,7 @@ async def test_list_tensorboard_experiments_async_pages(): pages = [] async for page_ in ( await client.list_tensorboard_experiments(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5096,7 +5106,7 @@ async def test_list_tensorboard_runs_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -5144,7 +5154,9 @@ async def test_list_tensorboard_runs_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_tensorboard_runs(request={})).pages: + async for page_ in ( + await client.list_tensorboard_runs(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6935,7 +6947,7 @@ async def test_list_tensorboard_time_series_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6988,7 +7000,7 @@ async def test_list_tensorboard_time_series_async_pages(): pages = [] async for page_ in ( await client.list_tensorboard_time_series(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -8939,7 +8951,7 @@ async def test_export_tensorboard_time_series_data_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -8991,7 +9003,7 @@ async def test_export_tensorboard_time_series_data_async_pages(): pages = [] async for page_ in ( await client.export_tensorboard_time_series_data(request={}) - ).pages: + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -9088,6 +9100,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = TensorboardServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = TensorboardServiceClient( @@ -9162,6 +9187,14 @@ def test_tensorboard_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_tensorboard_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -9309,24 +9342,40 @@ def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( ) -def test_tensorboard_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_tensorboard_service_host_no_port(transport_name): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_tensorboard_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_tensorboard_service_host_with_port(transport_name): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_tensorboard_service_grpc_transport_channel(): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index 1db1c1a8db..8688f8e3aa 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -97,24 +97,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - VizierServiceClient, - VizierServiceAsyncClient, + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), ], ) -def test_vizier_service_client_from_service_account_info(client_class): +def test_vizier_service_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") @pytest.mark.parametrize( @@ -143,27 +143,31 @@ def test_vizier_service_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - VizierServiceClient, - VizierServiceAsyncClient, + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), ], ) -def test_vizier_service_client_from_service_account_file(client_class): +def test_vizier_service_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") def test_vizier_service_client_get_transport_class(): @@ -1509,7 +1513,7 @@ async def test_list_studies_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1555,7 +1559,9 @@ async def test_list_studies_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_studies(request={})).pages: + async for page_ in ( + await client.list_studies(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3037,7 +3043,7 @@ async def test_list_trials_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3083,7 +3089,9 @@ async def test_list_trials_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_trials(request={})).pages: + async for page_ in ( + await client.list_trials(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4311,6 +4319,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = VizierServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = VizierServiceClient( @@ -4372,6 +4393,14 @@ def test_vizier_service_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_vizier_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -4517,24 +4546,40 @@ def test_vizier_service_grpc_transport_client_cert_source_for_mtls(transport_cla ) -def test_vizier_service_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_vizier_service_host_no_port(transport_name): client = VizierServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:443" + assert client.transport._host == ("aiplatform.googleapis.com:443") -def test_vizier_service_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_vizier_service_host_with_port(transport_name): client = VizierServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "aiplatform.googleapis.com:8000" + assert client.transport._host == ("aiplatform.googleapis.com:8000") def test_vizier_service_grpc_transport_channel():