@@ -249,7 +249,7 @@ def _evaluate_with_experiment(
249
249
model : Optional [Union [GenerativeModel , Callable [[str ], str ]]] = None ,
250
250
prompt_template : Optional [str ] = None ,
251
251
experiment_run_name : Optional [str ] = None ,
252
- response_column_name : str = "response" ,
252
+ response_column_name : Optional [ str ] = None ,
253
253
) -> EvalResult :
254
254
"""Runs an evaluation for the EvalTask with an experiment.
255
255
@@ -264,7 +264,7 @@ def _evaluate_with_experiment(
264
264
to if an experiment is set for this EvalTask. If not provided, a random
265
265
unique experiment run name is used.
266
266
response_column_name: The column name of model response in the dataset. If
267
- not set, default to `response `.
267
+ provided, this will override the `response_column_name` of the `EvalTask `.
268
268
269
269
Returns:
270
270
The evaluation result.
@@ -279,7 +279,7 @@ def _evaluate_with_experiment(
279
279
prompt_template = prompt_template ,
280
280
content_column_name = self .content_column_name ,
281
281
reference_column_name = self .reference_column_name ,
282
- response_column_name = response_column_name or self . response_column_name ,
282
+ response_column_name = response_column_name ,
283
283
)
284
284
try :
285
285
vertexai .preview .log_metrics (eval_result .summary_metrics )
@@ -293,7 +293,7 @@ def evaluate(
293
293
model : Optional [Union [GenerativeModel , Callable [[str ], str ]]] = None ,
294
294
prompt_template : Optional [str ] = None ,
295
295
experiment_run_name : Optional [str ] = None ,
296
- response_column_name : str = "response" ,
296
+ response_column_name : Optional [ str ] = None ,
297
297
) -> EvalResult :
298
298
"""Runs an evaluation for the EvalTask.
299
299
@@ -308,7 +308,7 @@ def evaluate(
308
308
to if an experiment is set for this EvalTask. If not provided, a random
309
309
unique experiment run name is used.
310
310
response_column_name: The column name of model response in the dataset. If
311
- not set, default to `response `.
311
+ provided, this will override the `response_column_name` of the `EvalTask `.
312
312
313
313
Returns:
314
314
The evaluation result.
@@ -321,7 +321,7 @@ def evaluate(
321
321
"`vertexai.init(experiment='experiment_name')`for logging this"
322
322
" evaluation run."
323
323
)
324
-
324
+ response_column_name = response_column_name or self . response_column_name
325
325
experiment_run_name = experiment_run_name or f"{ uuid .uuid4 ()} "
326
326
327
327
if self .experiment and global_experiment_name :
@@ -354,7 +354,7 @@ def evaluate(
354
354
prompt_template = prompt_template ,
355
355
content_column_name = self .content_column_name ,
356
356
reference_column_name = self .reference_column_name ,
357
- response_column_name = response_column_name or self . response_column_name ,
357
+ response_column_name = response_column_name ,
358
358
)
359
359
return eval_result
360
360
0 commit comments