Skip to content

Commit 375095e

Browse files
jsondaicopybara-github
authored andcommitted
feat: Add metadata to evaluation result.
PiperOrigin-RevId: 660589553
1 parent 8090f31 commit 375095e

File tree

2 files changed

+9
-4
lines changed

2 files changed

+9
-4
lines changed

vertexai/preview/evaluation/_base.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ class EvaluationRunConfig:
3737
3838
Attributes:
3939
dataset: The dataset to evaluate.
40-
metrics: The list of metric names, or metric bundle names, or Metric instances to evaluate.
40+
metrics: The list of metrics, or Metric instances to evaluate.
4141
column_map: The dictionary of column name overrides in the dataset.
4242
client: The evaluation service client.
4343
evaluation_service_qps: The custom QPS limit for the evaluation service.
@@ -73,10 +73,11 @@ class EvalResult:
7373
"""Evaluation result.
7474
7575
Attributes:
76-
summary_metrics: The summary evaluation metrics for an evaluation run.
77-
metrics_table: A table containing eval inputs, ground truth, and metrics per
78-
row.
76+
summary_metrics: The summary evaluation metrics for the evaluation run.
77+
metrics_table: A table containing evaluation dataset, and metric results.
78+
metadata: The metadata for the evaluation run.
7979
"""
8080

8181
summary_metrics: Dict[str, float]
8282
metrics_table: Optional["pd.DataFrame"] = None
83+
metadata: Optional[Dict[str, str]] = None

vertexai/preview/evaluation/_eval_tasks.py

+4
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,10 @@ def _evaluate_with_experiment(
298298
k: ("NaN" if isinstance(v, float) and np.isnan(v) else v)
299299
for k, v in eval_result.summary_metrics.items()
300300
}
301+
eval_result.metadata = {
302+
"experiment": self.experiment,
303+
"experiment_run": experiment_run_name,
304+
}
301305
try:
302306
vertexai.preview.log_metrics(eval_result.summary_metrics)
303307
except (TypeError, exceptions.InvalidArgument) as e:

0 commit comments

Comments
 (0)