Skip to content

Commit 10f95cd

Browse files
authored
feat: add ModelEvaluation support (#1167)
1 parent c1e899d commit 10f95cd

File tree

7 files changed

+535
-1
lines changed

7 files changed

+535
-1
lines changed

README.rst

+41
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,47 @@ Please visit `Importing models to Vertex AI`_ for a detailed overview:
283283

284284
.. _Importing models to Vertex AI: https://cloud.google.com/vertex-ai/docs/general/import-model
285285

286+
Model Evaluation
287+
----------------
288+
289+
The Vertex AI SDK for Python currently supports getting model evaluation metrics for all AutoML models.
290+
291+
To list all model evaluations for a model:
292+
293+
.. code-block:: Python
294+
295+
model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
296+
297+
evaluations = model.list_model_evaluations()
298+
299+
300+
To get the model evaluation resource for a given model:
301+
302+
.. code-block:: Python
303+
304+
model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
305+
306+
# returns the first evaluation with no arguments, you can also pass the evaluation ID
307+
evaluation = model.get_model_evaluation()
308+
309+
eval_metrics = evaluation.metrics
310+
311+
312+
You can also create a reference to your model evaluation directly by passing in the resource name of the model evaluation:
313+
314+
.. code-block:: Python
315+
316+
evaluation = aiplatform.ModelEvaluation(
317+
evaluation_name='/projects/my-project/locations/us-central1/models/{MODEL_ID}/evaluations/{EVALUATION_ID}')
318+
319+
Alternatively, you can create a reference to your evaluation by passing in the model and evaluation IDs:
320+
321+
.. code-block:: Python
322+
323+
evaluation = aiplatform.ModelEvaluation(
324+
evaluation_name={EVALUATION_ID},
325+
model_id={MODEL_ID})
326+
286327
287328
Batch Prediction
288329
----------------

google/cloud/aiplatform/__init__.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# -*- coding: utf-8 -*-
22

3-
# Copyright 2020 Google LLC
3+
# Copyright 2022 Google LLC
44
#
55
# Licensed under the Apache License, Version 2.0 (the "License");
66
# you may not use this file except in compliance with the License.
@@ -41,6 +41,7 @@
4141
from google.cloud.aiplatform.metadata import metadata
4242
from google.cloud.aiplatform.models import Endpoint
4343
from google.cloud.aiplatform.models import Model
44+
from google.cloud.aiplatform.model_evaluation import ModelEvaluation
4445
from google.cloud.aiplatform.jobs import (
4546
BatchPredictionJob,
4647
CustomJob,
@@ -107,6 +108,7 @@
107108
"ImageDataset",
108109
"HyperparameterTuningJob",
109110
"Model",
111+
"ModelEvaluation",
110112
"PipelineJob",
111113
"TabularDataset",
112114
"Tensorboard",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Copyright 2022 Google LLC
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
from google.cloud.aiplatform.model_evaluation.model_evaluation import ModelEvaluation
19+
20+
__all__ = ("ModelEvaluation",)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Copyright 2022 Google LLC
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
from google.auth import credentials as auth_credentials
19+
20+
from google.cloud.aiplatform import base
21+
from google.cloud.aiplatform import utils
22+
from google.cloud.aiplatform import models
23+
from google.protobuf import struct_pb2
24+
25+
from typing import Optional
26+
27+
28+
class ModelEvaluation(base.VertexAiResourceNounWithFutureManager):
29+
30+
client_class = utils.ModelClientWithOverride
31+
_resource_noun = "evaluations"
32+
_delete_method = None
33+
_getter_method = "get_model_evaluation"
34+
_list_method = "list_model_evaluations"
35+
_parse_resource_name_method = "parse_model_evaluation_path"
36+
_format_resource_name_method = "model_evaluation_path"
37+
38+
@property
39+
def metrics(self) -> Optional[struct_pb2.Value]:
40+
"""Gets the evaluation metrics from the Model Evaluation.
41+
Returns:
42+
A dict with model metrics created from the Model Evaluation or
43+
None if the metrics for this evaluation are empty.
44+
"""
45+
return self._gca_resource.metrics
46+
47+
def __init__(
48+
self,
49+
evaluation_name: str,
50+
model_id: Optional[str] = None,
51+
project: Optional[str] = None,
52+
location: Optional[str] = None,
53+
credentials: Optional[auth_credentials.Credentials] = None,
54+
):
55+
"""Retrieves the ModelEvaluation resource and instantiates its representation.
56+
57+
Args:
58+
evaluation_name (str):
59+
Required. A fully-qualified model evaluation resource name or evaluation ID.
60+
Example: "projects/123/locations/us-central1/models/456/evaluations/789" or
61+
"789". If passing only the evaluation ID, model_id must be provided.
62+
model_id (str):
63+
Optional. The ID of the model to retrieve this evaluation from. If passing
64+
only the evaluation ID as evaluation_name, model_id must be provided.
65+
project (str):
66+
Optional project to retrieve model evaluation from. If not set, project
67+
set in aiplatform.init will be used.
68+
location (str):
69+
Optional location to retrieve model evaluation from. If not set, location
70+
set in aiplatform.init will be used.
71+
credentials: Optional[auth_credentials.Credentials]=None,
72+
Custom credentials to use to retrieve this model evaluation. If not set,
73+
credentials set in aiplatform.init will be used.
74+
"""
75+
76+
super().__init__(
77+
project=project,
78+
location=location,
79+
credentials=credentials,
80+
resource_name=evaluation_name,
81+
)
82+
83+
self._gca_resource = self._get_gca_resource(
84+
resource_name=evaluation_name,
85+
parent_resource_name_fields={models.Model._resource_noun: model_id}
86+
if model_id
87+
else model_id,
88+
)
89+
90+
def delete(self):
91+
raise NotImplementedError(
92+
"Deleting a model evaluation has not been implemented yet."
93+
)

google/cloud/aiplatform/models.py

+77
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from google.cloud.aiplatform import models
3434
from google.cloud.aiplatform import utils
3535
from google.cloud.aiplatform.utils import gcs_utils
36+
from google.cloud.aiplatform import model_evaluation
3637

3738
from google.cloud.aiplatform.compat.services import endpoint_service_client
3839

@@ -3210,3 +3211,79 @@ def upload_tensorflow_saved_model(
32103211
sync=sync,
32113212
upload_request_timeout=upload_request_timeout,
32123213
)
3214+
3215+
def list_model_evaluations(
3216+
self,
3217+
) -> List["model_evaluation.ModelEvaluation"]:
3218+
"""List all Model Evaluation resources associated with this model.
3219+
3220+
Example Usage:
3221+
3222+
my_model = Model(
3223+
model_name="projects/123/locations/us-central1/models/456"
3224+
)
3225+
3226+
my_evaluations = my_model.list_model_evaluations()
3227+
3228+
Returns:
3229+
List[model_evaluation.ModelEvaluation]: List of ModelEvaluation resources
3230+
for the model.
3231+
"""
3232+
3233+
self.wait()
3234+
3235+
return model_evaluation.ModelEvaluation._list(
3236+
parent=self.resource_name,
3237+
credentials=self.credentials,
3238+
)
3239+
3240+
def get_model_evaluation(
3241+
self,
3242+
evaluation_id: Optional[str] = None,
3243+
) -> Optional[model_evaluation.ModelEvaluation]:
3244+
"""Returns a ModelEvaluation resource and instantiates its representation.
3245+
If no evaluation_id is passed, it will return the first evaluation associated
3246+
with this model.
3247+
3248+
Example usage:
3249+
3250+
my_model = Model(
3251+
model_name="projects/123/locations/us-central1/models/456"
3252+
)
3253+
3254+
my_evaluation = my_model.get_model_evaluation(
3255+
evaluation_id="789"
3256+
)
3257+
3258+
# If no arguments are passed, this returns the first evaluation for the model
3259+
my_evaluation = my_model.get_model_evaluation()
3260+
3261+
Args:
3262+
evaluation_id (str):
3263+
Optional. The ID of the model evaluation to retrieve.
3264+
Returns:
3265+
model_evaluation.ModelEvaluation: Instantiated representation of the
3266+
ModelEvaluation resource.
3267+
"""
3268+
3269+
evaluations = self.list_model_evaluations()
3270+
3271+
if not evaluation_id:
3272+
if len(evaluations) > 1:
3273+
_LOGGER.warning(
3274+
f"Your model has more than one model evaluation, this is returning only one evaluation resource: {evaluations[0].resource_name}"
3275+
)
3276+
return evaluations[0] if evaluations else evaluations
3277+
else:
3278+
resource_uri_parts = self._parse_resource_name(self.resource_name)
3279+
evaluation_resource_name = (
3280+
model_evaluation.ModelEvaluation._format_resource_name(
3281+
**resource_uri_parts,
3282+
evaluation=evaluation_id,
3283+
)
3284+
)
3285+
3286+
return model_evaluation.ModelEvaluation(
3287+
evaluation_name=evaluation_resource_name,
3288+
credentials=self.credentials,
3289+
)

0 commit comments

Comments
 (0)