Skip to content

Commit af0ee70

Browse files
authored
Merge branch 'main' into owl-bot-copy
2 parents 8fd79b6 + c71c3dd commit af0ee70

File tree

10 files changed

+96
-19
lines changed

10 files changed

+96
-19
lines changed

.kokoro/release.sh

+11-3
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,16 @@ python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /
2222
# Disable buffering, so that the logs stream through.
2323
export PYTHONUNBUFFERED=1
2424

25-
# Move into the package, build the distribution and upload.
26-
TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1")
25+
# Move into the `google-cloud-aiplatform` package, build the distribution and upload.
26+
GCA_TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1")
2727
cd github/python-aiplatform
2828
python3 setup.py sdist bdist_wheel
29-
twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
29+
twine upload --username __token__ --password "${GCA_TWINE_PASSWORD}" dist/*
30+
31+
# Move into the `vertexai` package, build the distribution and upload.
32+
VERTEXAI_TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_vertexai-pypi-token-1")
33+
cd github/python-aiplatform/pypi/_vertex_ai_placeholder
34+
python3 -m build
35+
twine upload --username __token__ --password "${VERTEXAI_TWINE_PASSWORD}" dist/*
36+
37+

.kokoro/release/common.cfg

+11-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ env_vars: {
2323
value: "github/python-aiplatform/.kokoro/release.sh"
2424
}
2525

26-
# Fetch PyPI password
26+
# Fetch google-cloud-aiplatform PyPI password
2727
before_action {
2828
fetch_keystore {
2929
keystore_resource {
@@ -33,6 +33,16 @@ before_action {
3333
}
3434
}
3535

36+
# Fetch vertexai PyPI password
37+
before_action {
38+
fetch_keystore {
39+
keystore_resource {
40+
keystore_config_id: 73713
41+
keyname: "vertexai-pypi-token-1"
42+
}
43+
}
44+
}
45+
3646
# Tokens needed to report release status back to GitHub
3747
env_vars: {
3848
key: "SECRET_MANAGER_KEYS"

.kokoro/requirements.in

+1
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,4 @@ setuptools
88
nox>=2022.11.21 # required to remove dependency on py
99
charset-normalizer<3
1010
click<8.1.0
11+
build

.kokoro/requirements.txt

+10-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#
2-
# This file is autogenerated by pip-compile with Python 3.9
2+
# This file is autogenerated by pip-compile with Python 3.11
33
# by the following command:
44
#
55
# pip-compile --allow-unsafe --generate-hashes requirements.in
@@ -12,6 +12,10 @@ attrs==23.1.0 \
1212
--hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \
1313
--hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015
1414
# via gcp-releasetool
15+
build==1.2.1 \
16+
--hash=sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d \
17+
--hash=sha256:75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4
18+
# via -r requirements.in
1519
cachetools==5.3.2 \
1620
--hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \
1721
--hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1
@@ -373,6 +377,7 @@ packaging==23.2 \
373377
--hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
374378
--hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7
375379
# via
380+
# build
376381
# gcp-releasetool
377382
# nox
378383
pkginfo==1.9.6 \
@@ -438,6 +443,10 @@ pyjwt==2.8.0 \
438443
pyperclip==1.8.2 \
439444
--hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
440445
# via gcp-releasetool
446+
pyproject-hooks==1.0.0 \
447+
--hash=sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8 \
448+
--hash=sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5
449+
# via build
441450
python-dateutil==2.8.2 \
442451
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
443452
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9

owlbot.py

+1
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@
112112
".kokoro/continuous/prerelease-deps.cfg",
113113
".kokoro/presubmit/prerelease-deps.cfg",
114114
".kokoro/docs/docs-presubmit.cfg",
115+
".kokoro/release.sh",
115116
# exclude sample configs so periodic samples are tested against main
116117
# instead of pypi
117118
".kokoro/samples/python3.7/common.cfg",

samples/model-builder/conftest.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from unittest.mock import MagicMock, patch
15+
from unittest.mock import MagicMock
16+
from unittest.mock import patch
1617

1718
from google.cloud import aiplatform
19+
import vertexai
1820
from vertexai.resources import preview as preview_resources
1921
import pytest
2022

@@ -25,6 +27,12 @@ def mock_sdk_init():
2527
yield mock
2628

2729

30+
@pytest.fixture
31+
def mock_vertexai_init():
32+
with patch.object(vertexai, "init") as mock:
33+
yield mock
34+
35+
2836
"""
2937
----------------------------------------------------------------------------
3038
Dataset Fixtures

samples/model-builder/init_sample.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@ def init_sample(
2828
service_account: Optional[str] = None,
2929
):
3030

31-
from google.cloud import aiplatform
31+
import vertexai
3232

33-
aiplatform.init(
33+
vertexai.init(
3434
project=project,
3535
location=location,
3636
experiment=experiment,

samples/model-builder/init_sample_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import test_constants as constants
1818

1919

20-
def test_init_sample(mock_sdk_init):
20+
def test_init_sample(mock_vertexai_init):
2121

2222
init_sample.init_sample(
2323
project=constants.PROJECT,
@@ -29,7 +29,7 @@ def test_init_sample(mock_sdk_init):
2929
service_account=constants.SERVICE_ACCOUNT,
3030
)
3131

32-
mock_sdk_init.assert_called_once_with(
32+
mock_vertexai_init.assert_called_once_with(
3333
project=constants.PROJECT,
3434
location=constants.LOCATION_EUROPE,
3535
experiment=constants.EXPERIMENT_NAME,

tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py

+19-5
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
_TEST_LOCATION = "us-central1"
3939
_TEST_PROJECT = "test-project"
4040
_TEST_MODEL = "gemini-1.0-pro"
41+
_TEST_SYSTEM_INSTRUCTION = "You are a helpful bot."
4142

4243

4344
def place_tool_query(
@@ -173,6 +174,7 @@ def test_initialization_with_tools(self, mock_chatvertexai):
173174
]
174175
agent = reasoning_engines.LangchainAgent(
175176
model=_TEST_MODEL,
177+
system_instruction=_TEST_SYSTEM_INSTRUCTION,
176178
tools=tools,
177179
)
178180
for tool, agent_tool in zip(tools, agent._tools):
@@ -255,11 +257,6 @@ def test_enable_tracing_warning(self, caplog, langchain_instrumentor_none_mock):
255257
assert "enable_tracing=True but proceeding with tracing disabled" in caplog.text
256258

257259

258-
class TestConvertToolsOrRaise:
259-
def test_convert_tools_or_raise(self, vertexai_init_mock):
260-
pass
261-
262-
263260
def _return_input_no_typing(input_):
264261
"""Returns input back to user."""
265262
return input_
@@ -272,3 +269,20 @@ def test_raise_untyped_input_args(self, vertexai_init_mock):
272269
model=_TEST_MODEL,
273270
tools=[_return_input_no_typing],
274271
)
272+
273+
274+
class TestSystemInstructionAndPromptRaisesErrors:
275+
def test_raise_both_system_instruction_and_prompt_error(self, vertexai_init_mock):
276+
with pytest.raises(
277+
ValueError,
278+
match=r"Only one of `prompt` or `system_instruction` should be specified.",
279+
):
280+
reasoning_engines.LangchainAgent(
281+
model=_TEST_MODEL,
282+
system_instruction=_TEST_SYSTEM_INSTRUCTION,
283+
prompt=prompts.ChatPromptTemplate.from_messages(
284+
[
285+
("user", "{input}"),
286+
]
287+
),
288+
)

vertexai/preview/reasoning_engines/templates/langchain.py

+30-4
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def _default_model_builder(
114114
def _default_runnable_builder(
115115
model: "BaseLanguageModel",
116116
*,
117+
system_instruction: Optional[str] = None,
117118
tools: Optional[Sequence["_ToolLike"]] = None,
118119
prompt: Optional["RunnableSerializable"] = None,
119120
output_parser: Optional["RunnableSerializable"] = None,
@@ -131,7 +132,10 @@ def _default_runnable_builder(
131132
# user would reflect that is by setting chat_history (which defaults to
132133
# None).
133134
has_history: bool = chat_history is not None
134-
prompt = prompt or _default_prompt(has_history)
135+
prompt = prompt or _default_prompt(
136+
has_history=has_history,
137+
system_instruction=system_instruction,
138+
)
135139
output_parser = output_parser or _default_output_parser()
136140
model_tool_kwargs = model_tool_kwargs or {}
137141
agent_executor_kwargs = agent_executor_kwargs or {}
@@ -162,7 +166,10 @@ def _default_runnable_builder(
162166
return agent_executor
163167

164168

165-
def _default_prompt(has_history: bool) -> "RunnableSerializable":
169+
def _default_prompt(
170+
has_history: bool,
171+
system_instruction: Optional[str] = None,
172+
) -> "RunnableSerializable":
166173
from langchain_core import prompts
167174

168175
try:
@@ -173,6 +180,10 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
173180
format_to_openai_tool_messages as format_to_tool_messages,
174181
)
175182

183+
system_instructions = []
184+
if system_instruction:
185+
system_instructions = [("system", system_instruction)]
186+
176187
if has_history:
177188
return {
178189
"history": lambda x: x["history"],
@@ -181,7 +192,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
181192
lambda x: format_to_tool_messages(x["intermediate_steps"])
182193
),
183194
} | prompts.ChatPromptTemplate.from_messages(
184-
[
195+
system_instructions
196+
+ [
185197
prompts.MessagesPlaceholder(variable_name="history"),
186198
("user", "{input}"),
187199
prompts.MessagesPlaceholder(variable_name="agent_scratchpad"),
@@ -194,7 +206,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
194206
lambda x: format_to_tool_messages(x["intermediate_steps"])
195207
),
196208
} | prompts.ChatPromptTemplate.from_messages(
197-
[
209+
system_instructions
210+
+ [
198211
("user", "{input}"),
199212
prompts.MessagesPlaceholder(variable_name="agent_scratchpad"),
200213
]
@@ -265,6 +278,7 @@ def __init__(
265278
self,
266279
model: str,
267280
*,
281+
system_instruction: Optional[str] = None,
268282
prompt: Optional["RunnableSerializable"] = None,
269283
tools: Optional[Sequence["_ToolLike"]] = None,
270284
output_parser: Optional["RunnableSerializable"] = None,
@@ -319,6 +333,9 @@ def __init__(
319333
Args:
320334
model (str):
321335
Optional. The name of the model (e.g. "gemini-1.0-pro").
336+
system_instruction (str):
337+
Optional. The system instruction to use for the agent. This
338+
argument should not be specified if `prompt` is specified.
322339
prompt (langchain_core.runnables.RunnableSerializable):
323340
Optional. The prompt template for the model. Defaults to a
324341
ChatPromptTemplate.
@@ -394,6 +411,7 @@ def __init__(
394411
False.
395412
396413
Raises:
414+
ValueError: If both `prompt` and `system_instruction` are specified.
397415
TypeError: If there is an invalid tool (e.g. function with an input
398416
that did not specify its type).
399417
"""
@@ -407,7 +425,14 @@ def __init__(
407425
# they are deployed.
408426
_validate_tools(tools)
409427
self._tools = tools
428+
if prompt and system_instruction:
429+
raise ValueError(
430+
"Only one of `prompt` or `system_instruction` should be specified. "
431+
"Consider incorporating the system instruction into the prompt "
432+
"rather than passing it separately as an argument."
433+
)
410434
self._model_name = model
435+
self._system_instruction = system_instruction
411436
self._prompt = prompt
412437
self._output_parser = output_parser
413438
self._chat_history = chat_history
@@ -528,6 +553,7 @@ def set_up(self):
528553
prompt=self._prompt,
529554
model=self._model,
530555
tools=self._tools,
556+
system_instruction=self._system_instruction,
531557
output_parser=self._output_parser,
532558
chat_history=self._chat_history,
533559
model_tool_kwargs=self._model_tool_kwargs,

0 commit comments

Comments
 (0)