Skip to content

Commit 1d142bb

Browse files
stainless-botmegamanics
authored andcommitted
feat(api): updates (openai#1474)
1 parent 0e64fd4 commit 1d142bb

File tree

11 files changed

+143
-1
lines changed

11 files changed

+143
-1
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 64
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml

src/openai/resources/beta/threads/runs/runs.py

+34
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ def create(
109109
None,
110110
]
111111
| NotGiven = NOT_GIVEN,
112+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
112113
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
113114
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
114115
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -163,6 +164,10 @@ def create(
163164
model associated with the assistant. If not, the model associated with the
164165
assistant will be used.
165166
167+
parallel_tool_calls: Whether to enable
168+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
169+
during tool use.
170+
166171
response_format: Specifies the format that the model must output. Compatible with
167172
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
168173
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -257,6 +262,7 @@ def create(
257262
None,
258263
]
259264
| NotGiven = NOT_GIVEN,
265+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
260266
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
261267
temperature: Optional[float] | NotGiven = NOT_GIVEN,
262268
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -314,6 +320,10 @@ def create(
314320
model associated with the assistant. If not, the model associated with the
315321
assistant will be used.
316322
323+
parallel_tool_calls: Whether to enable
324+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
325+
during tool use.
326+
317327
response_format: Specifies the format that the model must output. Compatible with
318328
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
319329
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -404,6 +414,7 @@ def create(
404414
None,
405415
]
406416
| NotGiven = NOT_GIVEN,
417+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
407418
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
408419
temperature: Optional[float] | NotGiven = NOT_GIVEN,
409420
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -461,6 +472,10 @@ def create(
461472
model associated with the assistant. If not, the model associated with the
462473
assistant will be used.
463474
475+
parallel_tool_calls: Whether to enable
476+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
477+
during tool use.
478+
464479
response_format: Specifies the format that the model must output. Compatible with
465480
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
466481
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -550,6 +565,7 @@ def create(
550565
None,
551566
]
552567
| NotGiven = NOT_GIVEN,
568+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
553569
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
554570
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
555571
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -579,6 +595,7 @@ def create(
579595
"max_prompt_tokens": max_prompt_tokens,
580596
"metadata": metadata,
581597
"model": model,
598+
"parallel_tool_calls": parallel_tool_calls,
582599
"response_format": response_format,
583600
"stream": stream,
584601
"temperature": temperature,
@@ -1666,6 +1683,7 @@ async def create(
16661683
None,
16671684
]
16681685
| NotGiven = NOT_GIVEN,
1686+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
16691687
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
16701688
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
16711689
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1720,6 +1738,10 @@ async def create(
17201738
model associated with the assistant. If not, the model associated with the
17211739
assistant will be used.
17221740
1741+
parallel_tool_calls: Whether to enable
1742+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1743+
during tool use.
1744+
17231745
response_format: Specifies the format that the model must output. Compatible with
17241746
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
17251747
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1814,6 +1836,7 @@ async def create(
18141836
None,
18151837
]
18161838
| NotGiven = NOT_GIVEN,
1839+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
18171840
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
18181841
temperature: Optional[float] | NotGiven = NOT_GIVEN,
18191842
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1871,6 +1894,10 @@ async def create(
18711894
model associated with the assistant. If not, the model associated with the
18721895
assistant will be used.
18731896
1897+
parallel_tool_calls: Whether to enable
1898+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1899+
during tool use.
1900+
18741901
response_format: Specifies the format that the model must output. Compatible with
18751902
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
18761903
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1961,6 +1988,7 @@ async def create(
19611988
None,
19621989
]
19631990
| NotGiven = NOT_GIVEN,
1991+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
19641992
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
19651993
temperature: Optional[float] | NotGiven = NOT_GIVEN,
19661994
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2018,6 +2046,10 @@ async def create(
20182046
model associated with the assistant. If not, the model associated with the
20192047
assistant will be used.
20202048
2049+
parallel_tool_calls: Whether to enable
2050+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
2051+
during tool use.
2052+
20212053
response_format: Specifies the format that the model must output. Compatible with
20222054
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
20232055
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -2107,6 +2139,7 @@ async def create(
21072139
None,
21082140
]
21092141
| NotGiven = NOT_GIVEN,
2142+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
21102143
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
21112144
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
21122145
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -2136,6 +2169,7 @@ async def create(
21362169
"max_prompt_tokens": max_prompt_tokens,
21372170
"metadata": metadata,
21382171
"model": model,
2172+
"parallel_tool_calls": parallel_tool_calls,
21392173
"response_format": response_format,
21402174
"stream": stream,
21412175
"temperature": temperature,

src/openai/resources/beta/threads/threads.py

+34
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,7 @@ def create_and_run(
291291
None,
292292
]
293293
| NotGiven = NOT_GIVEN,
294+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
294295
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
295296
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
296297
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -340,6 +341,10 @@ def create_and_run(
340341
model associated with the assistant. If not, the model associated with the
341342
assistant will be used.
342343
344+
parallel_tool_calls: Whether to enable
345+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
346+
during tool use.
347+
343348
response_format: Specifies the format that the model must output. Compatible with
344349
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
345350
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -438,6 +443,7 @@ def create_and_run(
438443
None,
439444
]
440445
| NotGiven = NOT_GIVEN,
446+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
441447
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
442448
temperature: Optional[float] | NotGiven = NOT_GIVEN,
443449
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -490,6 +496,10 @@ def create_and_run(
490496
model associated with the assistant. If not, the model associated with the
491497
assistant will be used.
492498
499+
parallel_tool_calls: Whether to enable
500+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
501+
during tool use.
502+
493503
response_format: Specifies the format that the model must output. Compatible with
494504
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
495505
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -584,6 +594,7 @@ def create_and_run(
584594
None,
585595
]
586596
| NotGiven = NOT_GIVEN,
597+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
587598
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
588599
temperature: Optional[float] | NotGiven = NOT_GIVEN,
589600
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -636,6 +647,10 @@ def create_and_run(
636647
model associated with the assistant. If not, the model associated with the
637648
assistant will be used.
638649
650+
parallel_tool_calls: Whether to enable
651+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
652+
during tool use.
653+
639654
response_format: Specifies the format that the model must output. Compatible with
640655
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
641656
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -729,6 +744,7 @@ def create_and_run(
729744
None,
730745
]
731746
| NotGiven = NOT_GIVEN,
747+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
732748
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
733749
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
734750
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -756,6 +772,7 @@ def create_and_run(
756772
"max_prompt_tokens": max_prompt_tokens,
757773
"metadata": metadata,
758774
"model": model,
775+
"parallel_tool_calls": parallel_tool_calls,
759776
"response_format": response_format,
760777
"stream": stream,
761778
"temperature": temperature,
@@ -1284,6 +1301,7 @@ async def create_and_run(
12841301
None,
12851302
]
12861303
| NotGiven = NOT_GIVEN,
1304+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
12871305
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
12881306
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
12891307
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1333,6 +1351,10 @@ async def create_and_run(
13331351
model associated with the assistant. If not, the model associated with the
13341352
assistant will be used.
13351353
1354+
parallel_tool_calls: Whether to enable
1355+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1356+
during tool use.
1357+
13361358
response_format: Specifies the format that the model must output. Compatible with
13371359
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
13381360
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1431,6 +1453,7 @@ async def create_and_run(
14311453
None,
14321454
]
14331455
| NotGiven = NOT_GIVEN,
1456+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
14341457
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
14351458
temperature: Optional[float] | NotGiven = NOT_GIVEN,
14361459
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1483,6 +1506,10 @@ async def create_and_run(
14831506
model associated with the assistant. If not, the model associated with the
14841507
assistant will be used.
14851508
1509+
parallel_tool_calls: Whether to enable
1510+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1511+
during tool use.
1512+
14861513
response_format: Specifies the format that the model must output. Compatible with
14871514
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
14881515
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1577,6 +1604,7 @@ async def create_and_run(
15771604
None,
15781605
]
15791606
| NotGiven = NOT_GIVEN,
1607+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
15801608
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
15811609
temperature: Optional[float] | NotGiven = NOT_GIVEN,
15821610
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1629,6 +1657,10 @@ async def create_and_run(
16291657
model associated with the assistant. If not, the model associated with the
16301658
assistant will be used.
16311659
1660+
parallel_tool_calls: Whether to enable
1661+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1662+
during tool use.
1663+
16321664
response_format: Specifies the format that the model must output. Compatible with
16331665
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
16341666
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1722,6 +1754,7 @@ async def create_and_run(
17221754
None,
17231755
]
17241756
| NotGiven = NOT_GIVEN,
1757+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
17251758
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
17261759
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
17271760
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1749,6 +1782,7 @@ async def create_and_run(
17491782
"max_prompt_tokens": max_prompt_tokens,
17501783
"metadata": metadata,
17511784
"model": model,
1785+
"parallel_tool_calls": parallel_tool_calls,
17521786
"response_format": response_format,
17531787
"stream": stream,
17541788
"temperature": temperature,

0 commit comments

Comments
 (0)