@@ -1289,6 +1289,8 @@ def predict(
1289
1289
presence_penalty : Optional [float ] = None ,
1290
1290
frequency_penalty : Optional [float ] = None ,
1291
1291
logit_bias : Optional [Dict [int , float ]] = None ,
1292
+ seed : Optional [int ] = None ,
1293
+ echo : Optional [bool ] = None ,
1292
1294
) -> "MultiCandidateTextGenerationResponse" :
1293
1295
"""Gets model response for a single prompt.
1294
1296
@@ -1321,6 +1323,14 @@ def predict(
1321
1323
Larger positive bias increases the probability of choosing the token.
1322
1324
Smaller negative bias decreases the probability of choosing the token.
1323
1325
Range: [-100.0, 100.0]
1326
+ seed:
1327
+ Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1328
+ logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1329
+ the same output with the same seed. If seed is not set, the seed used in decoder will not be
1330
+ deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1331
+ generated random noise will be deterministic.
1332
+ echo:
1333
+ If true, the prompt is echoed in the generated text.
1324
1334
1325
1335
Returns:
1326
1336
A `MultiCandidateTextGenerationResponse` object that contains the text produced by the model.
@@ -1338,6 +1348,8 @@ def predict(
1338
1348
presence_penalty = presence_penalty ,
1339
1349
frequency_penalty = frequency_penalty ,
1340
1350
logit_bias = logit_bias ,
1351
+ seed = seed ,
1352
+ echo = echo ,
1341
1353
)
1342
1354
1343
1355
prediction_response = self ._endpoint .predict (
@@ -1370,6 +1382,8 @@ async def predict_async(
1370
1382
presence_penalty : Optional [float ] = None ,
1371
1383
frequency_penalty : Optional [float ] = None ,
1372
1384
logit_bias : Optional [Dict [int , float ]] = None ,
1385
+ seed : Optional [int ] = None ,
1386
+ echo : Optional [bool ] = None ,
1373
1387
) -> "MultiCandidateTextGenerationResponse" :
1374
1388
"""Asynchronously gets model response for a single prompt.
1375
1389
@@ -1402,6 +1416,14 @@ async def predict_async(
1402
1416
Larger positive bias increases the probability of choosing the token.
1403
1417
Smaller negative bias decreases the probability of choosing the token.
1404
1418
Range: [-100.0, 100.0]
1419
+ seed:
1420
+ Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1421
+ logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1422
+ the same output with the same seed. If seed is not set, the seed used in decoder will not be
1423
+ deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1424
+ generated random noise will be deterministic.
1425
+ echo:
1426
+ If true, the prompt is echoed in the generated text.
1405
1427
1406
1428
Returns:
1407
1429
A `MultiCandidateTextGenerationResponse` object that contains the text produced by the model.
@@ -1419,6 +1441,8 @@ async def predict_async(
1419
1441
presence_penalty = presence_penalty ,
1420
1442
frequency_penalty = frequency_penalty ,
1421
1443
logit_bias = logit_bias ,
1444
+ seed = seed ,
1445
+ echo = echo ,
1422
1446
)
1423
1447
1424
1448
prediction_response = await self ._endpoint .predict_async (
@@ -1443,6 +1467,8 @@ def predict_streaming(
1443
1467
presence_penalty : Optional [float ] = None ,
1444
1468
frequency_penalty : Optional [float ] = None ,
1445
1469
logit_bias : Optional [Dict [int , float ]] = None ,
1470
+ seed : Optional [int ] = None ,
1471
+ echo : Optional [bool ] = None ,
1446
1472
) -> Iterator [TextGenerationResponse ]:
1447
1473
"""Gets a streaming model response for a single prompt.
1448
1474
@@ -1475,6 +1501,14 @@ def predict_streaming(
1475
1501
Larger positive bias increases the probability of choosing the token.
1476
1502
Smaller negative bias decreases the probability of choosing the token.
1477
1503
Range: [-100.0, 100.0]
1504
+ seed:
1505
+ Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1506
+ logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1507
+ the same output with the same seed. If seed is not set, the seed used in decoder will not be
1508
+ deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1509
+ generated random noise will be deterministic.
1510
+ echo:
1511
+ If true, the prompt is echoed in the generated text.
1478
1512
1479
1513
Yields:
1480
1514
A stream of `TextGenerationResponse` objects that contain partial
@@ -1491,6 +1525,8 @@ def predict_streaming(
1491
1525
presence_penalty = presence_penalty ,
1492
1526
frequency_penalty = frequency_penalty ,
1493
1527
logit_bias = logit_bias ,
1528
+ seed = seed ,
1529
+ echo = echo ,
1494
1530
)
1495
1531
1496
1532
prediction_service_client = self ._endpoint ._prediction_client
@@ -1521,6 +1557,8 @@ async def predict_streaming_async(
1521
1557
presence_penalty : Optional [float ] = None ,
1522
1558
frequency_penalty : Optional [float ] = None ,
1523
1559
logit_bias : Optional [Dict [int , float ]] = None ,
1560
+ seed : Optional [int ] = None ,
1561
+ echo : Optional [bool ] = None ,
1524
1562
) -> AsyncIterator [TextGenerationResponse ]:
1525
1563
"""Asynchronously gets a streaming model response for a single prompt.
1526
1564
@@ -1553,6 +1591,14 @@ async def predict_streaming_async(
1553
1591
Larger positive bias increases the probability of choosing the token.
1554
1592
Smaller negative bias decreases the probability of choosing the token.
1555
1593
Range: [-100.0, 100.0]
1594
+ seed:
1595
+ Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1596
+ logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1597
+ the same output with the same seed. If seed is not set, the seed used in decoder will not be
1598
+ deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1599
+ generated random noise will be deterministic.
1600
+ echo:
1601
+ If true, the prompt is echoed in the generated text.
1556
1602
1557
1603
Yields:
1558
1604
A stream of `TextGenerationResponse` objects that contain partial
@@ -1569,6 +1615,8 @@ async def predict_streaming_async(
1569
1615
presence_penalty = presence_penalty ,
1570
1616
frequency_penalty = frequency_penalty ,
1571
1617
logit_bias = logit_bias ,
1618
+ seed = seed ,
1619
+ echo = echo ,
1572
1620
)
1573
1621
1574
1622
prediction_service_async_client = self ._endpoint ._prediction_async_client
@@ -1605,6 +1653,8 @@ def _create_text_generation_prediction_request(
1605
1653
presence_penalty : Optional [float ] = None ,
1606
1654
frequency_penalty : Optional [float ] = None ,
1607
1655
logit_bias : Optional [Dict [int , int ]] = None ,
1656
+ seed : Optional [int ] = None ,
1657
+ echo : Optional [bool ] = None ,
1608
1658
) -> "_PredictionRequest" :
1609
1659
"""Prepares the text generation request for a single prompt.
1610
1660
@@ -1637,6 +1687,14 @@ def _create_text_generation_prediction_request(
1637
1687
Larger positive bias increases the probability of choosing the token.
1638
1688
Smaller negative bias decreases the probability of choosing the token.
1639
1689
Range: [-100.0, 100.0]
1690
+ seed:
1691
+ Decoder generates random noise with a pseudo random number generator, temperature * noise is added to
1692
+ logits before sampling. The pseudo random number generator (prng) takes a seed as input, it generates
1693
+ the same output with the same seed. If seed is not set, the seed used in decoder will not be
1694
+ deterministic, thus the generated random noise will not be deterministic. If seed is set, the
1695
+ generated random noise will be deterministic.
1696
+ echo:
1697
+ If true, the prompt is echoed in the generated text.
1640
1698
1641
1699
Returns:
1642
1700
A `_PredictionRequest` object that contains prediction instance and parameters.
@@ -1683,6 +1741,12 @@ def _create_text_generation_prediction_request(
1683
1741
if logit_bias is not None :
1684
1742
prediction_parameters ["logitBias" ] = logit_bias
1685
1743
1744
+ if seed is not None :
1745
+ prediction_parameters ["seed" ] = seed
1746
+
1747
+ if echo is not None :
1748
+ prediction_parameters ["echo" ] = echo
1749
+
1686
1750
return _PredictionRequest (
1687
1751
instance = instance ,
1688
1752
parameters = prediction_parameters ,
0 commit comments