Skip to content

Commit ba9a314

Browse files
gcf-owl-bot[bot]copybara-github
authored andcommitted
Copybara import of the project:
-- cb0e5fe by release-please[bot] <55107282+release-please[bot]@users.noreply.github.com>: chore(main): release 1.83.0 (#4990) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> -- a1535eb by Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>: feat: Add multihost_gpu_node_count to Vertex SDK for multihost GPU support PiperOrigin-RevId: 733768886 Source-Link: googleapis/googleapis@4129cd8 Source-Link: googleapis/googleapis-gen@fc4062c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmM0MDYyY2NjMjk3M2QzZGNkYzVlYTIyNGU3OTgxMzA3YjUzYzIxNyJ9 feat: allowing users to specify the version id of the Model Garden model feat: allowing users to choose whether to use the hf model cache PiperOrigin-RevId: 733750388 Source-Link: googleapis/googleapis@7e795c4 Source-Link: googleapis/googleapis-gen@7c5ddcd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2M1ZGRjZGRkOWRhYmRkMThmMTA2MzA4Y2E4OThkM2RhZGE2MGMwYSJ9 feat: allowing users to specify the version id of the Model Garden model feat: allowing users to choose whether to use the hf model cache PiperOrigin-RevId: 733567956 Source-Link: googleapis/googleapis@7dfaf27 Source-Link: googleapis/googleapis-gen@65e6556 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjVlNjU1NjNkMjVhMjI4ZDNlNWU1NzkxZDMyMTdjOTc2ZmQyMzA1MCJ9 feat: add Layout Parser to RAG v1 API PiperOrigin-RevId: 733531494 Source-Link: googleapis/googleapis@f4f8efb Source-Link: googleapis/googleapis-gen@6574f78 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjU3NGY3ODZjM2NjYjcxNjE5YzBiNzJjOWY3OWI0ODk5MDRkODRhNiJ9 fix!: remove VertexAISearch.engine option PiperOrigin-RevId: 733027838 Source-Link: googleapis/googleapis@73bb1ed Source-Link: googleapis/googleapis-gen@b395057 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjM5NTA1NzhmNDVkMjVlYzQxNDQ2YWQzM2NlZTMwMjQyYjIxMjY3OSJ9 -- 154fe16 by Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>: 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md COPYBARA_INTEGRATE_REVIEW=#4998 from googleapis:owl-bot-copy ace9aad PiperOrigin-RevId: 733971689
1 parent cb0e5fe commit ba9a314

24 files changed

+217
-86
lines changed

google/cloud/aiplatform_v1/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -973,6 +973,7 @@
973973
from .types.vertex_rag_data import RagEmbeddingModelConfig
974974
from .types.vertex_rag_data import RagFile
975975
from .types.vertex_rag_data import RagFileChunkingConfig
976+
from .types.vertex_rag_data import RagFileParsingConfig
976977
from .types.vertex_rag_data import RagFileTransformationConfig
977978
from .types.vertex_rag_data import RagVectorDbConfig
978979
from .types.vertex_rag_data import UploadRagFileConfig
@@ -1742,6 +1743,7 @@
17421743
"RagEmbeddingModelConfig",
17431744
"RagFile",
17441745
"RagFileChunkingConfig",
1746+
"RagFileParsingConfig",
17451747
"RagFileTransformationConfig",
17461748
"RagQuery",
17471749
"RagRetrievalConfig",

google/cloud/aiplatform_v1/types/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -1102,6 +1102,7 @@
11021102
RagEmbeddingModelConfig,
11031103
RagFile,
11041104
RagFileChunkingConfig,
1105+
RagFileParsingConfig,
11051106
RagFileTransformationConfig,
11061107
RagVectorDbConfig,
11071108
UploadRagFileConfig,
@@ -2021,6 +2022,7 @@
20212022
"RagEmbeddingModelConfig",
20222023
"RagFile",
20232024
"RagFileChunkingConfig",
2025+
"RagFileParsingConfig",
20242026
"RagFileTransformationConfig",
20252027
"RagVectorDbConfig",
20262028
"UploadRagFileConfig",

google/cloud/aiplatform_v1/types/model.py

+14
Original file line numberDiff line numberDiff line change
@@ -709,12 +709,26 @@ class ModelGardenSource(proto.Message):
709709
public_model_name (str):
710710
Required. The model garden source model
711711
resource name.
712+
version_id (str):
713+
Optional. The model garden source model
714+
version ID.
715+
skip_hf_model_cache (bool):
716+
Optional. Whether to avoid pulling the model
717+
from the HF cache.
712718
"""
713719

714720
public_model_name: str = proto.Field(
715721
proto.STRING,
716722
number=1,
717723
)
724+
version_id: str = proto.Field(
725+
proto.STRING,
726+
number=3,
727+
)
728+
skip_hf_model_cache: bool = proto.Field(
729+
proto.BOOL,
730+
number=4,
731+
)
718732

719733

720734
class GenieSource(proto.Message):

google/cloud/aiplatform_v1/types/tool.py

+3-12
Original file line numberDiff line numberDiff line change
@@ -470,29 +470,20 @@ class RagResource(proto.Message):
470470

471471

472472
class VertexAISearch(proto.Message):
473-
r"""Retrieve from Vertex AI Search datastore or engine for
474-
grounding. datastore and engine are mutually exclusive. See
475-
https://cloud.google.com/products/agent-builder
473+
r"""Retrieve from Vertex AI Search datastore for grounding.
474+
See https://cloud.google.com/products/agent-builder
476475
477476
Attributes:
478477
datastore (str):
479-
Optional. Fully-qualified Vertex AI Search data store
478+
Required. Fully-qualified Vertex AI Search data store
480479
resource ID. Format:
481480
``projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}``
482-
engine (str):
483-
Optional. Fully-qualified Vertex AI Search engine resource
484-
ID. Format:
485-
``projects/{project}/locations/{location}/collections/{collection}/engines/{engine}``
486481
"""
487482

488483
datastore: str = proto.Field(
489484
proto.STRING,
490485
number=1,
491486
)
492-
engine: str = proto.Field(
493-
proto.STRING,
494-
number=2,
495-
)
496487

497488

498489
class GoogleSearchRetrieval(proto.Message):

google/cloud/aiplatform_v1/types/vertex_rag_data.py

+62
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
"RagFile",
3636
"RagFileChunkingConfig",
3737
"RagFileTransformationConfig",
38+
"RagFileParsingConfig",
3839
"UploadRagFileConfig",
3940
"ImportRagFilesConfig",
4041
},
@@ -526,6 +527,58 @@ class RagFileTransformationConfig(proto.Message):
526527
)
527528

528529

530+
class RagFileParsingConfig(proto.Message):
531+
r"""Specifies the parsing config for RagFiles.
532+
533+
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
534+
535+
Attributes:
536+
layout_parser (google.cloud.aiplatform_v1.types.RagFileParsingConfig.LayoutParser):
537+
The Layout Parser to use for RagFiles.
538+
539+
This field is a member of `oneof`_ ``parser``.
540+
"""
541+
542+
class LayoutParser(proto.Message):
543+
r"""Document AI Layout Parser config.
544+
545+
Attributes:
546+
processor_name (str):
547+
The full resource name of a Document AI processor or
548+
processor version. The processor must have type
549+
``LAYOUT_PARSER_PROCESSOR``. If specified, the
550+
``additional_config.parse_as_scanned_pdf`` field must be
551+
false. Format:
552+
553+
- ``projects/{project_id}/locations/{location}/processors/{processor_id}``
554+
- ``projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}``
555+
max_parsing_requests_per_min (int):
556+
The maximum number of requests the job is
557+
allowed to make to the Document AI processor per
558+
minute. Consult
559+
https://cloud.google.com/document-ai/quotas and
560+
the Quota page for your project to set an
561+
appropriate value here. If unspecified, a
562+
default value of 120 QPM would be used.
563+
"""
564+
565+
processor_name: str = proto.Field(
566+
proto.STRING,
567+
number=1,
568+
)
569+
max_parsing_requests_per_min: int = proto.Field(
570+
proto.INT32,
571+
number=2,
572+
)
573+
574+
layout_parser: LayoutParser = proto.Field(
575+
proto.MESSAGE,
576+
number=4,
577+
oneof="parser",
578+
message=LayoutParser,
579+
)
580+
581+
529582
class UploadRagFileConfig(proto.Message):
530583
r"""Config for uploading RagFile.
531584
@@ -600,6 +653,10 @@ class ImportRagFilesConfig(proto.Message):
600653
rag_file_transformation_config (google.cloud.aiplatform_v1.types.RagFileTransformationConfig):
601654
Specifies the transformation config for
602655
RagFiles.
656+
rag_file_parsing_config (google.cloud.aiplatform_v1.types.RagFileParsingConfig):
657+
Optional. Specifies the parsing config for
658+
RagFiles. RAG will use the default parser if
659+
this field is not set.
603660
max_embedding_requests_per_min (int):
604661
Optional. The max number of queries per
605662
minute that this job is allowed to make to the
@@ -658,6 +715,11 @@ class ImportRagFilesConfig(proto.Message):
658715
number=16,
659716
message="RagFileTransformationConfig",
660717
)
718+
rag_file_parsing_config: "RagFileParsingConfig" = proto.Field(
719+
proto.MESSAGE,
720+
number=8,
721+
message="RagFileParsingConfig",
722+
)
661723
max_embedding_requests_per_min: int = proto.Field(
662724
proto.INT32,
663725
number=5,

google/cloud/aiplatform_v1beta1/services/migration_service/client.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -242,40 +242,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
242242
@staticmethod
243243
def dataset_path(
244244
project: str,
245-
location: str,
246245
dataset: str,
247246
) -> str:
248247
"""Returns a fully-qualified dataset string."""
249-
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
248+
return "projects/{project}/datasets/{dataset}".format(
250249
project=project,
251-
location=location,
252250
dataset=dataset,
253251
)
254252

255253
@staticmethod
256254
def parse_dataset_path(path: str) -> Dict[str, str]:
257255
"""Parses a dataset path into its component segments."""
258-
m = re.match(
259-
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
260-
path,
261-
)
256+
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
262257
return m.groupdict() if m else {}
263258

264259
@staticmethod
265260
def dataset_path(
266261
project: str,
262+
location: str,
267263
dataset: str,
268264
) -> str:
269265
"""Returns a fully-qualified dataset string."""
270-
return "projects/{project}/datasets/{dataset}".format(
266+
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
271267
project=project,
268+
location=location,
272269
dataset=dataset,
273270
)
274271

275272
@staticmethod
276273
def parse_dataset_path(path: str) -> Dict[str, str]:
277274
"""Parses a dataset path into its component segments."""
278-
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
275+
m = re.match(
276+
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
277+
path,
278+
)
279279
return m.groupdict() if m else {}
280280

281281
@staticmethod

google/cloud/aiplatform_v1beta1/types/machine_resources.py

+7
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,9 @@ class MachineSpec(proto.Message):
7676
Immutable. The topology of the TPUs. Corresponds to the TPU
7777
topologies available from GKE. (Example: tpu_topology:
7878
"2x2x1").
79+
multihost_gpu_node_count (int):
80+
Optional. Immutable. The number of nodes per
81+
replica for multihost GPU deployments.
7982
reservation_affinity (google.cloud.aiplatform_v1beta1.types.ReservationAffinity):
8083
Optional. Immutable. Configuration
8184
controlling how this resource pool consumes
@@ -99,6 +102,10 @@ class MachineSpec(proto.Message):
99102
proto.STRING,
100103
number=4,
101104
)
105+
multihost_gpu_node_count: int = proto.Field(
106+
proto.INT32,
107+
number=6,
108+
)
102109
reservation_affinity: gca_reservation_affinity.ReservationAffinity = proto.Field(
103110
proto.MESSAGE,
104111
number=5,

google/cloud/aiplatform_v1beta1/types/model.py

+14
Original file line numberDiff line numberDiff line change
@@ -636,12 +636,26 @@ class ModelGardenSource(proto.Message):
636636
public_model_name (str):
637637
Required. The model garden source model
638638
resource name.
639+
version_id (str):
640+
Optional. The model garden source model
641+
version ID.
642+
skip_hf_model_cache (bool):
643+
Optional. Whether to avoid pulling the model
644+
from the HF cache.
639645
"""
640646

641647
public_model_name: str = proto.Field(
642648
proto.STRING,
643649
number=1,
644650
)
651+
version_id: str = proto.Field(
652+
proto.STRING,
653+
number=3,
654+
)
655+
skip_hf_model_cache: bool = proto.Field(
656+
proto.BOOL,
657+
number=4,
658+
)
645659

646660

647661
class GenieSource(proto.Message):

google/cloud/aiplatform_v1beta1/types/tool.py

+3-12
Original file line numberDiff line numberDiff line change
@@ -565,29 +565,20 @@ class RagResource(proto.Message):
565565

566566

567567
class VertexAISearch(proto.Message):
568-
r"""Retrieve from Vertex AI Search datastore or engine for
569-
grounding. datastore and engine are mutually exclusive. See
570-
https://cloud.google.com/products/agent-builder
568+
r"""Retrieve from Vertex AI Search datastore for grounding.
569+
See https://cloud.google.com/products/agent-builder
571570
572571
Attributes:
573572
datastore (str):
574-
Optional. Fully-qualified Vertex AI Search data store
573+
Required. Fully-qualified Vertex AI Search data store
575574
resource ID. Format:
576575
``projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}``
577-
engine (str):
578-
Optional. Fully-qualified Vertex AI Search engine resource
579-
ID. Format:
580-
``projects/{project}/locations/{location}/collections/{collection}/engines/{engine}``
581576
"""
582577

583578
datastore: str = proto.Field(
584579
proto.STRING,
585580
number=1,
586581
)
587-
engine: str = proto.Field(
588-
proto.STRING,
589-
number=2,
590-
)
591582

592583

593584
class GoogleSearchRetrieval(proto.Message):

tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py

+4-16
Original file line numberDiff line numberDiff line change
@@ -4670,10 +4670,7 @@ def test_create_cached_content_rest_call_success(request_type):
46704670
}
46714671
],
46724672
"retrieval": {
4673-
"vertex_ai_search": {
4674-
"datastore": "datastore_value",
4675-
"engine": "engine_value",
4676-
},
4673+
"vertex_ai_search": {"datastore": "datastore_value"},
46774674
"vertex_rag_store": {
46784675
"rag_resources": [
46794676
{
@@ -5145,10 +5142,7 @@ def test_update_cached_content_rest_call_success(request_type):
51455142
}
51465143
],
51475144
"retrieval": {
5148-
"vertex_ai_search": {
5149-
"datastore": "datastore_value",
5150-
"engine": "engine_value",
5151-
},
5145+
"vertex_ai_search": {"datastore": "datastore_value"},
51525146
"vertex_rag_store": {
51535147
"rag_resources": [
51545148
{
@@ -6482,10 +6476,7 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type):
64826476
}
64836477
],
64846478
"retrieval": {
6485-
"vertex_ai_search": {
6486-
"datastore": "datastore_value",
6487-
"engine": "engine_value",
6488-
},
6479+
"vertex_ai_search": {"datastore": "datastore_value"},
64896480
"vertex_rag_store": {
64906481
"rag_resources": [
64916482
{
@@ -6989,10 +6980,7 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type):
69896980
}
69906981
],
69916982
"retrieval": {
6992-
"vertex_ai_search": {
6993-
"datastore": "datastore_value",
6994-
"engine": "engine_value",
6995-
},
6983+
"vertex_ai_search": {"datastore": "datastore_value"},
69966984
"vertex_rag_store": {
69976985
"rag_resources": [
69986986
{

tests/unit/gapic/aiplatform_v1/test_model_service.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -14980,7 +14980,11 @@ def test_update_model_rest_call_success(request_type):
1498014980
"original_model_info": {"model": "model_value"},
1498114981
"metadata_artifact": "metadata_artifact_value",
1498214982
"base_model_source": {
14983-
"model_garden_source": {"public_model_name": "public_model_name_value"},
14983+
"model_garden_source": {
14984+
"public_model_name": "public_model_name_value",
14985+
"version_id": "version_id_value",
14986+
"skip_hf_model_cache": True,
14987+
},
1498414988
"genie_source": {"base_model_uri": "base_model_uri_value"},
1498514989
},
1498614990
"satisfies_pzs": True,
@@ -18957,7 +18961,11 @@ async def test_update_model_rest_asyncio_call_success(request_type):
1895718961
"original_model_info": {"model": "model_value"},
1895818962
"metadata_artifact": "metadata_artifact_value",
1895918963
"base_model_source": {
18960-
"model_garden_source": {"public_model_name": "public_model_name_value"},
18964+
"model_garden_source": {
18965+
"public_model_name": "public_model_name_value",
18966+
"version_id": "version_id_value",
18967+
"skip_hf_model_cache": True,
18968+
},
1896118969
"genie_source": {"base_model_uri": "base_model_uri_value"},
1896218970
},
1896318971
"satisfies_pzs": True,

0 commit comments

Comments
 (0)