@@ -114,6 +114,7 @@ def _default_model_builder(
114
114
def _default_runnable_builder (
115
115
model : "BaseLanguageModel" ,
116
116
* ,
117
+ system_instruction : Optional [str ] = None ,
117
118
tools : Optional [Sequence ["_ToolLike" ]] = None ,
118
119
prompt : Optional ["RunnableSerializable" ] = None ,
119
120
output_parser : Optional ["RunnableSerializable" ] = None ,
@@ -131,7 +132,10 @@ def _default_runnable_builder(
131
132
# user would reflect that is by setting chat_history (which defaults to
132
133
# None).
133
134
has_history : bool = chat_history is not None
134
- prompt = prompt or _default_prompt (has_history )
135
+ prompt = prompt or _default_prompt (
136
+ has_history = has_history ,
137
+ system_instruction = system_instruction ,
138
+ )
135
139
output_parser = output_parser or _default_output_parser ()
136
140
model_tool_kwargs = model_tool_kwargs or {}
137
141
agent_executor_kwargs = agent_executor_kwargs or {}
@@ -162,7 +166,10 @@ def _default_runnable_builder(
162
166
return agent_executor
163
167
164
168
165
- def _default_prompt (has_history : bool ) -> "RunnableSerializable" :
169
+ def _default_prompt (
170
+ has_history : bool ,
171
+ system_instruction : Optional [str ] = None ,
172
+ ) -> "RunnableSerializable" :
166
173
from langchain_core import prompts
167
174
168
175
try :
@@ -173,6 +180,10 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
173
180
format_to_openai_tool_messages as format_to_tool_messages ,
174
181
)
175
182
183
+ system_instructions = []
184
+ if system_instruction :
185
+ system_instructions = [("system" , system_instruction )]
186
+
176
187
if has_history :
177
188
return {
178
189
"history" : lambda x : x ["history" ],
@@ -181,7 +192,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
181
192
lambda x : format_to_tool_messages (x ["intermediate_steps" ])
182
193
),
183
194
} | prompts .ChatPromptTemplate .from_messages (
184
- [
195
+ system_instructions
196
+ + [
185
197
prompts .MessagesPlaceholder (variable_name = "history" ),
186
198
("user" , "{input}" ),
187
199
prompts .MessagesPlaceholder (variable_name = "agent_scratchpad" ),
@@ -194,7 +206,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
194
206
lambda x : format_to_tool_messages (x ["intermediate_steps" ])
195
207
),
196
208
} | prompts .ChatPromptTemplate .from_messages (
197
- [
209
+ system_instructions
210
+ + [
198
211
("user" , "{input}" ),
199
212
prompts .MessagesPlaceholder (variable_name = "agent_scratchpad" ),
200
213
]
@@ -265,6 +278,7 @@ def __init__(
265
278
self ,
266
279
model : str ,
267
280
* ,
281
+ system_instruction : Optional [str ] = None ,
268
282
prompt : Optional ["RunnableSerializable" ] = None ,
269
283
tools : Optional [Sequence ["_ToolLike" ]] = None ,
270
284
output_parser : Optional ["RunnableSerializable" ] = None ,
@@ -319,6 +333,9 @@ def __init__(
319
333
Args:
320
334
model (str):
321
335
Optional. The name of the model (e.g. "gemini-1.0-pro").
336
+ system_instruction (str):
337
+ Optional. The system instruction to use for the agent. This
338
+ argument should not be specified if `prompt` is specified.
322
339
prompt (langchain_core.runnables.RunnableSerializable):
323
340
Optional. The prompt template for the model. Defaults to a
324
341
ChatPromptTemplate.
@@ -394,6 +411,7 @@ def __init__(
394
411
False.
395
412
396
413
Raises:
414
+ ValueError: If both `prompt` and `system_instruction` are specified.
397
415
TypeError: If there is an invalid tool (e.g. function with an input
398
416
that did not specify its type).
399
417
"""
@@ -407,7 +425,14 @@ def __init__(
407
425
# they are deployed.
408
426
_validate_tools (tools )
409
427
self ._tools = tools
428
+ if prompt and system_instruction :
429
+ raise ValueError (
430
+ "Only one of `prompt` or `system_instruction` should be specified. "
431
+ "Consider incorporating the system instruction into the prompt "
432
+ "rather than passing it separately as an argument."
433
+ )
410
434
self ._model_name = model
435
+ self ._system_instruction = system_instruction
411
436
self ._prompt = prompt
412
437
self ._output_parser = output_parser
413
438
self ._chat_history = chat_history
@@ -528,6 +553,7 @@ def set_up(self):
528
553
prompt = self ._prompt ,
529
554
model = self ._model ,
530
555
tools = self ._tools ,
556
+ system_instruction = self ._system_instruction ,
531
557
output_parser = self ._output_parser ,
532
558
chat_history = self ._chat_history ,
533
559
model_tool_kwargs = self ._model_tool_kwargs ,
0 commit comments