Skip to content

Commit 4990eb6

Browse files
Ark-kuncopybara-github
authored andcommitted
feat: GenAI - Added support for system instructions
Usage: ``` model = generative_models.GenerativeModel( "gemini-1.0-pro", system_instruction=[ "Talk like a pirate.", "Don't use rude words.", ], ) ``` PiperOrigin-RevId: 621703355
1 parent d0585e8 commit 4990eb6

File tree

4 files changed

+41
-2
lines changed

4 files changed

+41
-2
lines changed

tests/system/vertexai/test_generative_models.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,13 @@ async def test_generate_content_streaming_async(self):
121121
assert chunk.text
122122

123123
def test_generate_content_with_parameters(self):
124-
model = generative_models.GenerativeModel("gemini-pro")
124+
model = generative_models.GenerativeModel(
125+
"gemini-pro",
126+
system_instruction=[
127+
"Talk like a pirate.",
128+
"Don't use rude words.",
129+
],
130+
)
125131
response = model.generate_content(
126132
contents="Why is sky blue?",
127133
generation_config=generative_models.GenerationConfig(

tests/unit/vertexai/test_generative_models.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,14 @@ def test_generate_content(self, generative_models: generative_models):
297297
response = model.generate_content("Why is sky blue?")
298298
assert response.text
299299

300-
response2 = model.generate_content(
300+
model2 = generative_models.GenerativeModel(
301+
"gemini-pro",
302+
system_instruction=[
303+
"Talk like a pirate.",
304+
"Don't use rude words.",
305+
],
306+
)
307+
response2 = model2.generate_content(
301308
"Why is sky blue?",
302309
generation_config=generative_models.GenerationConfig(
303310
temperature=0.2,

vertexai/generative_models/README.md

+13
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,19 @@ print(vision_chat.send_message(["I like this image.", image]))
6363
print(vision_chat.send_message("What things do I like?."))
6464
```
6565

66+
#### System instructions
67+
```
68+
from vertexai.generative_models import GenerativeModel
69+
model = GenerativeModel(
70+
"gemini-1.0-pro",
71+
system_instruction=[
72+
"Talk like a pirate.",
73+
"Don't use rude words.",
74+
],
75+
)
76+
print(model.generate_content("Why is sky blue?"))
77+
```
78+
6679
#### Function calling
6780

6881
```

vertexai/generative_models/_generative_models.py

+13
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ def __init__(
133133
generation_config: Optional[GenerationConfigType] = None,
134134
safety_settings: Optional[SafetySettingsType] = None,
135135
tools: Optional[List["Tool"]] = None,
136+
system_instruction: Optional[PartsType] = None,
136137
):
137138
r"""Initializes GenerativeModel.
138139
@@ -147,6 +148,9 @@ def __init__(
147148
generation_config: Default generation config to use in generate_content.
148149
safety_settings: Default safety settings to use in generate_content.
149150
tools: Default tools to use in generate_content.
151+
system_instruction: Default system instruction to use in generate_content.
152+
Note: Only text should be used in parts.
153+
Content of each part will become a separate paragraph.
150154
"""
151155
if "/" not in model_name:
152156
model_name = "publishers/google/models/" + model_name
@@ -163,13 +167,15 @@ def __init__(
163167
self._generation_config = generation_config
164168
self._safety_settings = safety_settings
165169
self._tools = tools
170+
self._system_instruction = system_instruction
166171

167172
# Validating the parameters
168173
self._prepare_request(
169174
contents="test",
170175
generation_config=generation_config,
171176
safety_settings=safety_settings,
172177
tools=tools,
178+
system_instruction=system_instruction,
173179
)
174180

175181
@property
@@ -205,6 +211,7 @@ def _prepare_request(
205211
generation_config: Optional[GenerationConfigType] = None,
206212
safety_settings: Optional[SafetySettingsType] = None,
207213
tools: Optional[List["Tool"]] = None,
214+
system_instruction: Optional[PartsType] = None,
208215
) -> gapic_prediction_service_types.GenerateContentRequest:
209216
"""Prepares a GAPIC GenerateContentRequest."""
210217
if not contents:
@@ -213,6 +220,7 @@ def _prepare_request(
213220
generation_config = generation_config or self._generation_config
214221
safety_settings = safety_settings or self._safety_settings
215222
tools = tools or self._tools
223+
system_instruction = system_instruction or self._system_instruction
216224

217225
# contents can either be a list of Content objects (most generic case)
218226
if isinstance(contents, Sequence) and any(
@@ -244,6 +252,10 @@ def _prepare_request(
244252
else:
245253
contents = [_to_content(contents)]
246254

255+
gapic_system_instruction: Optional[gapic_content_types.Content] = None
256+
if system_instruction:
257+
gapic_system_instruction = _to_content(system_instruction)
258+
247259
gapic_generation_config: Optional[gapic_content_types.GenerationConfig] = None
248260
if generation_config:
249261
if isinstance(generation_config, gapic_content_types.GenerationConfig):
@@ -307,6 +319,7 @@ def _prepare_request(
307319
generation_config=gapic_generation_config,
308320
safety_settings=gapic_safety_settings,
309321
tools=gapic_tools,
322+
system_instruction=gapic_system_instruction,
310323
)
311324

312325
def _parse_response(

0 commit comments

Comments
 (0)