@@ -388,13 +388,25 @@ def __init__(
388
388
def _prediction_client (self ) -> prediction_service .PredictionServiceClient :
389
389
# Switch to @functools.cached_property once its available.
390
390
if not getattr (self , "_prediction_client_value" , None ):
391
- self ._prediction_client_value = (
392
- aiplatform_initializer .global_config .create_client (
393
- client_class = prediction_service .PredictionServiceClient ,
394
- location_override = self ._location ,
395
- prediction_client = True ,
391
+ if (
392
+ aiplatform_initializer .global_config .api_key
393
+ and not aiplatform_initializer .global_config .project
394
+ ):
395
+ self ._prediction_client_value = (
396
+ aiplatform_initializer .global_config .create_client (
397
+ client_class = prediction_service .PredictionServiceClient ,
398
+ api_key = aiplatform_initializer .global_config .api_key ,
399
+ prediction_client = True ,
400
+ )
401
+ )
402
+ else :
403
+ self ._prediction_client_value = (
404
+ aiplatform_initializer .global_config .create_client (
405
+ client_class = prediction_service .PredictionServiceClient ,
406
+ location_override = self ._location ,
407
+ prediction_client = True ,
408
+ )
396
409
)
397
- )
398
410
return self ._prediction_client_value
399
411
400
412
@property
@@ -403,26 +415,46 @@ def _prediction_async_client(
403
415
) -> prediction_service .PredictionServiceAsyncClient :
404
416
# Switch to @functools.cached_property once its available.
405
417
if not getattr (self , "_prediction_async_client_value" , None ):
406
- self ._prediction_async_client_value = (
407
- aiplatform_initializer .global_config .create_client (
408
- client_class = prediction_service .PredictionServiceAsyncClient ,
409
- location_override = self ._location ,
410
- prediction_client = True ,
418
+ if (
419
+ aiplatform_initializer .global_config .api_key
420
+ and not aiplatform_initializer .global_config .project
421
+ ):
422
+ raise RuntimeError (
423
+ "Using an api key is not supported yet for async clients."
424
+ )
425
+ else :
426
+ self ._prediction_async_client_value = (
427
+ aiplatform_initializer .global_config .create_client (
428
+ client_class = prediction_service .PredictionServiceAsyncClient ,
429
+ location_override = self ._location ,
430
+ prediction_client = True ,
431
+ )
411
432
)
412
- )
413
433
return self ._prediction_async_client_value
414
434
415
435
@property
416
436
def _llm_utility_client (self ) -> llm_utility_service .LlmUtilityServiceClient :
417
437
# Switch to @functools.cached_property once its available.
418
438
if not getattr (self , "_llm_utility_client_value" , None ):
419
- self ._llm_utility_client_value = (
420
- aiplatform_initializer .global_config .create_client (
421
- client_class = llm_utility_service .LlmUtilityServiceClient ,
422
- location_override = self ._location ,
423
- prediction_client = True ,
439
+ if (
440
+ aiplatform_initializer .global_config .api_key
441
+ and not aiplatform_initializer .global_config .project
442
+ ):
443
+ self ._llm_utility_client_value = (
444
+ aiplatform_initializer .global_config .create_client (
445
+ client_class = llm_utility_service .LlmUtilityServiceClient ,
446
+ api_key = aiplatform_initializer .global_config .api_key ,
447
+ prediction_client = True ,
448
+ )
449
+ )
450
+ else :
451
+ self ._llm_utility_client_value = (
452
+ aiplatform_initializer .global_config .create_client (
453
+ client_class = llm_utility_service .LlmUtilityServiceClient ,
454
+ location_override = self ._location ,
455
+ prediction_client = True ,
456
+ )
424
457
)
425
- )
426
458
return self ._llm_utility_client_value
427
459
428
460
@property
@@ -431,13 +463,21 @@ def _llm_utility_async_client(
431
463
) -> llm_utility_service .LlmUtilityServiceAsyncClient :
432
464
# Switch to @functools.cached_property once its available.
433
465
if not getattr (self , "_llm_utility_async_client_value" , None ):
434
- self ._llm_utility_async_client_value = (
435
- aiplatform_initializer .global_config .create_client (
436
- client_class = llm_utility_service .LlmUtilityServiceAsyncClient ,
437
- location_override = self ._location ,
438
- prediction_client = True ,
466
+ if (
467
+ aiplatform_initializer .global_config .api_key
468
+ and not aiplatform_initializer .global_config .project
469
+ ):
470
+ raise RuntimeError (
471
+ "Using an api key is not supported yet for async clients."
472
+ )
473
+ else :
474
+ self ._llm_utility_async_client_value = (
475
+ aiplatform_initializer .global_config .create_client (
476
+ client_class = llm_utility_service .LlmUtilityServiceAsyncClient ,
477
+ location_override = self ._location ,
478
+ prediction_client = True ,
479
+ )
439
480
)
440
- )
441
481
return self ._llm_utility_async_client_value
442
482
443
483
def _prepare_request (
0 commit comments