Skip to content

Commit 98f9b35

Browse files
jsondaicopybara-github
authored andcommitted
fix: Fix the default value of response_column_name in EvalTask.evaluate()
PiperOrigin-RevId: 633978835
1 parent 0feac9f commit 98f9b35

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

vertexai/preview/evaluation/_eval_tasks.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ def _evaluate_with_experiment(
249249
model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
250250
prompt_template: Optional[str] = None,
251251
experiment_run_name: Optional[str] = None,
252-
response_column_name: str = "response",
252+
response_column_name: Optional[str] = None,
253253
) -> EvalResult:
254254
"""Runs an evaluation for the EvalTask with an experiment.
255255
@@ -264,7 +264,7 @@ def _evaluate_with_experiment(
264264
to if an experiment is set for this EvalTask. If not provided, a random
265265
unique experiment run name is used.
266266
response_column_name: The column name of model response in the dataset. If
267-
not set, default to `response`.
267+
provided, this will override the `response_column_name` of the `EvalTask`.
268268
269269
Returns:
270270
The evaluation result.
@@ -279,7 +279,7 @@ def _evaluate_with_experiment(
279279
prompt_template=prompt_template,
280280
content_column_name=self.content_column_name,
281281
reference_column_name=self.reference_column_name,
282-
response_column_name=response_column_name or self.response_column_name,
282+
response_column_name=response_column_name,
283283
)
284284
try:
285285
vertexai.preview.log_metrics(eval_result.summary_metrics)
@@ -293,7 +293,7 @@ def evaluate(
293293
model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
294294
prompt_template: Optional[str] = None,
295295
experiment_run_name: Optional[str] = None,
296-
response_column_name: str = "response",
296+
response_column_name: Optional[str] = None,
297297
) -> EvalResult:
298298
"""Runs an evaluation for the EvalTask.
299299
@@ -308,7 +308,7 @@ def evaluate(
308308
to if an experiment is set for this EvalTask. If not provided, a random
309309
unique experiment run name is used.
310310
response_column_name: The column name of model response in the dataset. If
311-
not set, default to `response`.
311+
provided, this will override the `response_column_name` of the `EvalTask`.
312312
313313
Returns:
314314
The evaluation result.
@@ -321,7 +321,7 @@ def evaluate(
321321
"`vertexai.init(experiment='experiment_name')`for logging this"
322322
" evaluation run."
323323
)
324-
324+
response_column_name = response_column_name or self.response_column_name
325325
experiment_run_name = experiment_run_name or f"{uuid.uuid4()}"
326326

327327
if self.experiment and global_experiment_name:
@@ -354,7 +354,7 @@ def evaluate(
354354
prompt_template=prompt_template,
355355
content_column_name=self.content_column_name,
356356
reference_column_name=self.reference_column_name,
357-
response_column_name=response_column_name or self.response_column_name,
357+
response_column_name=response_column_name,
358358
)
359359
return eval_result
360360

0 commit comments

Comments
 (0)