@@ -291,6 +291,7 @@ def create_and_run(
291
291
None ,
292
292
]
293
293
| NotGiven = NOT_GIVEN ,
294
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
294
295
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
295
296
stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
296
297
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -340,6 +341,10 @@ def create_and_run(
340
341
model associated with the assistant. If not, the model associated with the
341
342
assistant will be used.
342
343
344
+ parallel_tool_calls: Whether to enable
345
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
346
+ during tool use.
347
+
343
348
response_format: Specifies the format that the model must output. Compatible with
344
349
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
345
350
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -438,6 +443,7 @@ def create_and_run(
438
443
None ,
439
444
]
440
445
| NotGiven = NOT_GIVEN ,
446
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
441
447
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
442
448
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
443
449
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -490,6 +496,10 @@ def create_and_run(
490
496
model associated with the assistant. If not, the model associated with the
491
497
assistant will be used.
492
498
499
+ parallel_tool_calls: Whether to enable
500
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
501
+ during tool use.
502
+
493
503
response_format: Specifies the format that the model must output. Compatible with
494
504
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
495
505
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -584,6 +594,7 @@ def create_and_run(
584
594
None ,
585
595
]
586
596
| NotGiven = NOT_GIVEN ,
597
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
587
598
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
588
599
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
589
600
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -636,6 +647,10 @@ def create_and_run(
636
647
model associated with the assistant. If not, the model associated with the
637
648
assistant will be used.
638
649
650
+ parallel_tool_calls: Whether to enable
651
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
652
+ during tool use.
653
+
639
654
response_format: Specifies the format that the model must output. Compatible with
640
655
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
641
656
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -729,6 +744,7 @@ def create_and_run(
729
744
None ,
730
745
]
731
746
| NotGiven = NOT_GIVEN ,
747
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
732
748
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
733
749
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
734
750
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -756,6 +772,7 @@ def create_and_run(
756
772
"max_prompt_tokens" : max_prompt_tokens ,
757
773
"metadata" : metadata ,
758
774
"model" : model ,
775
+ "parallel_tool_calls" : parallel_tool_calls ,
759
776
"response_format" : response_format ,
760
777
"stream" : stream ,
761
778
"temperature" : temperature ,
@@ -1284,6 +1301,7 @@ async def create_and_run(
1284
1301
None ,
1285
1302
]
1286
1303
| NotGiven = NOT_GIVEN ,
1304
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1287
1305
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1288
1306
stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
1289
1307
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1333,6 +1351,10 @@ async def create_and_run(
1333
1351
model associated with the assistant. If not, the model associated with the
1334
1352
assistant will be used.
1335
1353
1354
+ parallel_tool_calls: Whether to enable
1355
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1356
+ during tool use.
1357
+
1336
1358
response_format: Specifies the format that the model must output. Compatible with
1337
1359
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1338
1360
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1431,6 +1453,7 @@ async def create_and_run(
1431
1453
None ,
1432
1454
]
1433
1455
| NotGiven = NOT_GIVEN ,
1456
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1434
1457
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1435
1458
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
1436
1459
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1483,6 +1506,10 @@ async def create_and_run(
1483
1506
model associated with the assistant. If not, the model associated with the
1484
1507
assistant will be used.
1485
1508
1509
+ parallel_tool_calls: Whether to enable
1510
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1511
+ during tool use.
1512
+
1486
1513
response_format: Specifies the format that the model must output. Compatible with
1487
1514
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1488
1515
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1577,6 +1604,7 @@ async def create_and_run(
1577
1604
None ,
1578
1605
]
1579
1606
| NotGiven = NOT_GIVEN ,
1607
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1580
1608
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1581
1609
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
1582
1610
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1629,6 +1657,10 @@ async def create_and_run(
1629
1657
model associated with the assistant. If not, the model associated with the
1630
1658
assistant will be used.
1631
1659
1660
+ parallel_tool_calls: Whether to enable
1661
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1662
+ during tool use.
1663
+
1632
1664
response_format: Specifies the format that the model must output. Compatible with
1633
1665
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1634
1666
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1722,6 +1754,7 @@ async def create_and_run(
1722
1754
None ,
1723
1755
]
1724
1756
| NotGiven = NOT_GIVEN ,
1757
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1725
1758
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1726
1759
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
1727
1760
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1749,6 +1782,7 @@ async def create_and_run(
1749
1782
"max_prompt_tokens" : max_prompt_tokens ,
1750
1783
"metadata" : metadata ,
1751
1784
"model" : model ,
1785
+ "parallel_tool_calls" : parallel_tool_calls ,
1752
1786
"response_format" : response_format ,
1753
1787
"stream" : stream ,
1754
1788
"temperature" : temperature ,
0 commit comments