Skip to content

Commit f7feeca

Browse files
Ark-kuncopybara-github
authored andcommitted
feat: LLM - CodeChat - Added support for context
PiperOrigin-RevId: 563934372
1 parent d76bceb commit f7feeca

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

tests/unit/aiplatform/test_language_models.py

+1
Original file line numberDiff line numberDiff line change
@@ -2038,6 +2038,7 @@ def test_code_chat(self):
20382038
)
20392039

20402040
code_chat = model.start_chat(
2041+
context="We're working on large-scale production system.",
20412042
max_output_tokens=128,
20422043
temperature=0.2,
20432044
stop_sequences=["\n"],

vertexai/language_models/_language_models.py

+8
Original file line numberDiff line numberDiff line change
@@ -1287,6 +1287,7 @@ class CodeChatModel(_ChatModelBase):
12871287
code_chat_model = CodeChatModel.from_pretrained("codechat-bison@001")
12881288
12891289
code_chat = code_chat_model.start_chat(
1290+
context="I'm writing a large-scale enterprise application.",
12901291
max_output_tokens=128,
12911292
temperature=0.2,
12921293
)
@@ -1301,6 +1302,7 @@ class CodeChatModel(_ChatModelBase):
13011302
def start_chat(
13021303
self,
13031304
*,
1305+
context: Optional[str] = None,
13041306
max_output_tokens: Optional[int] = None,
13051307
temperature: Optional[float] = None,
13061308
message_history: Optional[List[ChatMessage]] = None,
@@ -1309,6 +1311,9 @@ def start_chat(
13091311
"""Starts a chat session with the code chat model.
13101312
13111313
Args:
1314+
context: Context shapes how the model responds throughout the conversation.
1315+
For example, you can use context to specify words the model can or
1316+
cannot use, topics to focus on or avoid, or the response format or style.
13121317
max_output_tokens: Max length of the output text in tokens. Range: [1, 1000].
13131318
temperature: Controls the randomness of predictions. Range: [0, 1].
13141319
stop_sequences: Customized stop sequences to stop the decoding process.
@@ -1318,6 +1323,7 @@ def start_chat(
13181323
"""
13191324
return CodeChatSession(
13201325
model=self,
1326+
context=context,
13211327
max_output_tokens=max_output_tokens,
13221328
temperature=temperature,
13231329
message_history=message_history,
@@ -1653,13 +1659,15 @@ class CodeChatSession(_ChatSessionBase):
16531659
def __init__(
16541660
self,
16551661
model: CodeChatModel,
1662+
context: Optional[str] = None,
16561663
max_output_tokens: Optional[int] = None,
16571664
temperature: Optional[float] = None,
16581665
message_history: Optional[List[ChatMessage]] = None,
16591666
stop_sequences: Optional[List[str]] = None,
16601667
):
16611668
super().__init__(
16621669
model=model,
1670+
context=context,
16631671
max_output_tokens=max_output_tokens,
16641672
temperature=temperature,
16651673
message_history=message_history,

0 commit comments

Comments
 (0)