Skip to content

Commit 298958f

Browse files
authored
feat: Add support for start_execution in MLMD SDK. (#1465)
* feat: Add support for start_execution in MLMD SDK. * update comment to remove reference to resouce_id * Update python docs
1 parent 50bafe8 commit 298958f

File tree

2 files changed

+100
-0
lines changed

2 files changed

+100
-0
lines changed

google/cloud/aiplatform/metadata/schema/base_execution.py

+73
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from google.cloud.aiplatform.compat.types import execution as gca_execution
2525
from google.cloud.aiplatform.metadata import constants
2626
from google.cloud.aiplatform.metadata import execution
27+
from google.cloud.aiplatform.metadata import metadata
2728

2829

2930
class BaseExecutionSchema(metaclass=abc.ABCMeta):
@@ -112,3 +113,75 @@ def create(
112113
credentials=credentials,
113114
)
114115
return self.execution
116+
117+
def start_execution(
118+
self,
119+
*,
120+
metadata_store_id: Optional[str] = "default",
121+
resume: bool = False,
122+
project: Optional[str] = None,
123+
location: Optional[str] = None,
124+
credentials: Optional[auth_credentials.Credentials] = None,
125+
) -> "execution.Execution":
126+
"""Create and starts a new Metadata Execution or resumes a previously created Execution.
127+
128+
This method is similar to create_execution with additional support for Experiments.
129+
If an Experiment is set prior to running this command, the Experiment will be
130+
associtaed with the created execution, otherwise this method behaves the same
131+
as create_execution.
132+
133+
To start a new execution:
134+
```
135+
instance_of_execution_schema = execution_schema.ContainerExecution(...)
136+
with instance_of_execution_schema.start_execution() as exc:
137+
exc.assign_input_artifacts([my_artifact])
138+
model = aiplatform.Artifact.create(uri='gs://my-uri', schema_title='system.Model')
139+
exc.assign_output_artifacts([model])
140+
```
141+
142+
To continue a previously created execution:
143+
```
144+
with execution_schema.ContainerExecution(resource_id='my-exc', resume=True) as exc:
145+
...
146+
```
147+
Args:
148+
metadata_store_id (str):
149+
Optional. The <metadata_store_id> portion of the resource name with
150+
the format:
151+
projects/123/locations/us-central1/metadataStores/<metadata_store_id>/executions/<executions_id>
152+
If not provided, the MetadataStore's ID will be set to "default". Currently only the 'default'
153+
MetadataStore ID is supported.
154+
resume (bool):
155+
Resume an existing execution.
156+
project (str):
157+
Optional. Project used to create this Execution. Overrides project set in
158+
aiplatform.init.
159+
location (str):
160+
Optional. Location used to create this Execution. Overrides location set in
161+
aiplatform.init.
162+
credentials (auth_credentials.Credentials):
163+
Optional. Custom credentials used to create this Execution. Overrides
164+
credentials set in aiplatform.init.
165+
Returns:
166+
Execution: Instantiated representation of the managed Metadata Execution.
167+
Raises:
168+
ValueError: If metadata_store_id other than 'default' is provided.
169+
"""
170+
if metadata_store_id != "default":
171+
raise ValueError(
172+
f"metadata_store_id {metadata_store_id} is not supported. Only the default MetadataStore ID is supported."
173+
)
174+
175+
return metadata._ExperimentTracker().start_execution(
176+
schema_title=self.schema_title,
177+
display_name=self.display_name,
178+
resource_id=self.execution_id,
179+
metadata=self.metadata,
180+
schema_version=self.schema_version,
181+
description=self.description,
182+
# TODO: Add support for metadata_store_id once it is supported in experiment.
183+
resume=resume,
184+
project=project,
185+
location=location,
186+
credentials=credentials,
187+
)

tests/unit/aiplatform/test_metadata_schema.py

+27
Original file line numberDiff line numberDiff line change
@@ -561,3 +561,30 @@ def test_container_spec_to_dict_method_returns_correct_schema(self):
561561
}
562562

563563
assert json.dumps(container_spec.to_dict()) == json.dumps(expected_results)
564+
565+
@pytest.mark.usefixtures("create_execution_mock")
566+
def test_start_execution_method_calls_gapic_library_with_correct_parameters(
567+
self, create_execution_mock
568+
):
569+
aiplatform.init(project=_TEST_PROJECT)
570+
571+
class TestExecution(base_execution.BaseExecutionSchema):
572+
schema_title = _TEST_SCHEMA_TITLE
573+
574+
execution = TestExecution(
575+
state=_TEST_EXECUTION_STATE,
576+
display_name=_TEST_DISPLAY_NAME,
577+
description=_TEST_DESCRIPTION,
578+
metadata=_TEST_UPDATED_METADATA,
579+
)
580+
execution.start_execution()
581+
create_execution_mock.assert_called_once_with(
582+
parent=f"{_TEST_PARENT}/metadataStores/default",
583+
execution=mock.ANY,
584+
execution_id=None,
585+
)
586+
_, _, kwargs = create_execution_mock.mock_calls[0]
587+
assert kwargs["execution"].schema_title == _TEST_SCHEMA_TITLE
588+
assert kwargs["execution"].display_name == _TEST_DISPLAY_NAME
589+
assert kwargs["execution"].description == _TEST_DESCRIPTION
590+
assert kwargs["execution"].metadata == _TEST_UPDATED_METADATA

0 commit comments

Comments
 (0)