Skip to content

Commit 6e23d68

Browse files
mekenArk-kun
authored andcommitted
fix: missing request parameters
1 parent c21b7eb commit 6e23d68

File tree

2 files changed

+68
-0
lines changed

2 files changed

+68
-0
lines changed

tests/unit/aiplatform/test_language_models.py

+4
Original file line numberDiff line numberDiff line change
@@ -1775,6 +1775,8 @@ def test_text_generation_ga(self):
17751775
presence_penalty=1.0,
17761776
frequency_penalty=1.0,
17771777
logit_bias={1: 100.0, 2: -100.0},
1778+
seed=42,
1779+
echo=True,
17781780
)
17791781

17801782
expected_errors = (100,)
@@ -1788,6 +1790,8 @@ def test_text_generation_ga(self):
17881790
assert prediction_parameters["presencePenalty"] == 1.0
17891791
assert prediction_parameters["frequencyPenalty"] == 1.0
17901792
assert prediction_parameters["logitBias"] == {1: 100.0, 2: -100.0}
1793+
assert prediction_parameters["seed"] == 42
1794+
assert prediction_parameters["echo"] is True
17911795
assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"]
17921796
assert response.errors == expected_errors
17931797

vertexai/language_models/_language_models.py

+64
Original file line numberDiff line numberDiff line change
@@ -1289,6 +1289,8 @@ def predict(
12891289
presence_penalty: Optional[float] = None,
12901290
frequency_penalty: Optional[float] = None,
12911291
logit_bias: Optional[Dict[int, float]] = None,
1292+
seed: Optional[int] = None,
1293+
echo: Optional[bool] = None,
12921294
) -> "MultiCandidateTextGenerationResponse":
12931295
"""Gets model response for a single prompt.
12941296
@@ -1321,6 +1323,14 @@ def predict(
13211323
Larger positive bias increases the probability of choosing the token.
13221324
Smaller negative bias decreases the probability of choosing the token.
13231325
Range: [-100.0, 100.0]
1326+
seed:
1327+
Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1328+
logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1329+
the same output with the same seed. If seed is not set, the seed used in decoder will not be
1330+
deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1331+
generated random noise will be deterministic.
1332+
echo:
1333+
If true, the prompt is echoed in the generated text.
13241334
13251335
Returns:
13261336
A `MultiCandidateTextGenerationResponse` object that contains the text produced by the model.
@@ -1338,6 +1348,8 @@ def predict(
13381348
presence_penalty=presence_penalty,
13391349
frequency_penalty=frequency_penalty,
13401350
logit_bias=logit_bias,
1351+
seed=seed,
1352+
echo=echo,
13411353
)
13421354

13431355
prediction_response = self._endpoint.predict(
@@ -1370,6 +1382,8 @@ async def predict_async(
13701382
presence_penalty: Optional[float] = None,
13711383
frequency_penalty: Optional[float] = None,
13721384
logit_bias: Optional[Dict[int, float]] = None,
1385+
seed: Optional[int] = None,
1386+
echo: Optional[bool] = None,
13731387
) -> "MultiCandidateTextGenerationResponse":
13741388
"""Asynchronously gets model response for a single prompt.
13751389
@@ -1402,6 +1416,14 @@ async def predict_async(
14021416
Larger positive bias increases the probability of choosing the token.
14031417
Smaller negative bias decreases the probability of choosing the token.
14041418
Range: [-100.0, 100.0]
1419+
seed:
1420+
Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1421+
logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1422+
the same output with the same seed. If seed is not set, the seed used in decoder will not be
1423+
deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1424+
generated random noise will be deterministic.
1425+
echo:
1426+
If true, the prompt is echoed in the generated text.
14051427
14061428
Returns:
14071429
A `MultiCandidateTextGenerationResponse` object that contains the text produced by the model.
@@ -1419,6 +1441,8 @@ async def predict_async(
14191441
presence_penalty=presence_penalty,
14201442
frequency_penalty=frequency_penalty,
14211443
logit_bias=logit_bias,
1444+
seed=seed,
1445+
echo=echo,
14221446
)
14231447

14241448
prediction_response = await self._endpoint.predict_async(
@@ -1443,6 +1467,8 @@ def predict_streaming(
14431467
presence_penalty: Optional[float] = None,
14441468
frequency_penalty: Optional[float] = None,
14451469
logit_bias: Optional[Dict[int, float]] = None,
1470+
seed: Optional[int] = None,
1471+
echo: Optional[bool] = None,
14461472
) -> Iterator[TextGenerationResponse]:
14471473
"""Gets a streaming model response for a single prompt.
14481474
@@ -1475,6 +1501,14 @@ def predict_streaming(
14751501
Larger positive bias increases the probability of choosing the token.
14761502
Smaller negative bias decreases the probability of choosing the token.
14771503
Range: [-100.0, 100.0]
1504+
seed:
1505+
Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1506+
logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1507+
the same output with the same seed. If seed is not set, the seed used in decoder will not be
1508+
deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1509+
generated random noise will be deterministic.
1510+
echo:
1511+
If true, the prompt is echoed in the generated text.
14781512
14791513
Yields:
14801514
A stream of `TextGenerationResponse` objects that contain partial
@@ -1491,6 +1525,8 @@ def predict_streaming(
14911525
presence_penalty=presence_penalty,
14921526
frequency_penalty=frequency_penalty,
14931527
logit_bias=logit_bias,
1528+
seed=seed,
1529+
echo=echo,
14941530
)
14951531

14961532
prediction_service_client = self._endpoint._prediction_client
@@ -1521,6 +1557,8 @@ async def predict_streaming_async(
15211557
presence_penalty: Optional[float] = None,
15221558
frequency_penalty: Optional[float] = None,
15231559
logit_bias: Optional[Dict[int, float]] = None,
1560+
seed: Optional[int] = None,
1561+
echo: Optional[bool] = None,
15241562
) -> AsyncIterator[TextGenerationResponse]:
15251563
"""Asynchronously gets a streaming model response for a single prompt.
15261564
@@ -1553,6 +1591,14 @@ async def predict_streaming_async(
15531591
Larger positive bias increases the probability of choosing the token.
15541592
Smaller negative bias decreases the probability of choosing the token.
15551593
Range: [-100.0, 100.0]
1594+
seed:
1595+
Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1596+
logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1597+
the same output with the same seed. If seed is not set, the seed used in decoder will not be
1598+
deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1599+
generated random noise will be deterministic.
1600+
echo:
1601+
If true, the prompt is echoed in the generated text.
15561602
15571603
Yields:
15581604
A stream of `TextGenerationResponse` objects that contain partial
@@ -1569,6 +1615,8 @@ async def predict_streaming_async(
15691615
presence_penalty=presence_penalty,
15701616
frequency_penalty=frequency_penalty,
15711617
logit_bias=logit_bias,
1618+
seed=seed,
1619+
echo=echo,
15721620
)
15731621

15741622
prediction_service_async_client = self._endpoint._prediction_async_client
@@ -1605,6 +1653,8 @@ def _create_text_generation_prediction_request(
16051653
presence_penalty: Optional[float] = None,
16061654
frequency_penalty: Optional[float] = None,
16071655
logit_bias: Optional[Dict[int, int]] = None,
1656+
seed: Optional[int] = None,
1657+
echo: Optional[bool] = None,
16081658
) -> "_PredictionRequest":
16091659
"""Prepares the text generation request for a single prompt.
16101660
@@ -1637,6 +1687,14 @@ def _create_text_generation_prediction_request(
16371687
Larger positive bias increases the probability of choosing the token.
16381688
Smaller negative bias decreases the probability of choosing the token.
16391689
Range: [-100.0, 100.0]
1690+
seed:
1691+
Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1692+
logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1693+
the same output with the same seed. If seed is not set, the seed used in decoder will not be
1694+
deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1695+
generated random noise will be deterministic.
1696+
echo:
1697+
If true, the prompt is echoed in the generated text.
16401698
16411699
Returns:
16421700
A `_PredictionRequest` object that contains prediction instance and parameters.
@@ -1683,6 +1741,12 @@ def _create_text_generation_prediction_request(
16831741
if logit_bias is not None:
16841742
prediction_parameters["logitBias"] = logit_bias
16851743

1744+
if seed is not None:
1745+
prediction_parameters["seed"] = seed
1746+
1747+
if echo is not None:
1748+
prediction_parameters["echo"] = echo
1749+
16861750
return _PredictionRequest(
16871751
instance=instance,
16881752
parameters=prediction_parameters,

0 commit comments

Comments
 (0)