Skip to content

Commit 8b3da05

Browse files
committed
feat(client): add ._request_id property to object responses (#1707)
1 parent 192b8f2 commit 8b3da05

File tree

6 files changed

+129
-6
lines changed

6 files changed

+129
-6
lines changed

README.md

+18
Original file line numberDiff line numberDiff line change
@@ -417,6 +417,24 @@ Error codes are as followed:
417417
| >=500 | `InternalServerError` |
418418
| N/A | `APIConnectionError` |
419419

420+
## Request IDs
421+
422+
> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests)
423+
424+
All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI.
425+
426+
```python
427+
completion = await client.chat.completions.create(
428+
messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4"
429+
)
430+
print(completion._request_id) # req_123
431+
```
432+
433+
Note that unlike other properties that use an `_` prefix, the `_request_id` property
434+
*is* public. Unless documented otherwise, *all* other `_` prefix properties,
435+
methods and modules are *private*.
436+
437+
420438
### Retries
421439

422440
Certain errors are automatically retried 2 times by default, with a short exponential backoff.

src/openai/_legacy_response.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626
from ._types import NoneType
2727
from ._utils import is_given, extract_type_arg, is_annotated_type
28-
from ._models import BaseModel, is_basemodel
28+
from ._models import BaseModel, is_basemodel, add_request_id
2929
from ._constants import RAW_RESPONSE_HEADER
3030
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
3131
from ._exceptions import APIResponseValidationError
@@ -138,8 +138,11 @@ class MyModel(BaseModel):
138138
if is_given(self._options.post_parser):
139139
parsed = self._options.post_parser(parsed)
140140

141+
if isinstance(parsed, BaseModel):
142+
add_request_id(parsed, self.request_id)
143+
141144
self._parsed_by_type[cache_key] = parsed
142-
return parsed
145+
return cast(R, parsed)
143146

144147
@property
145148
def headers(self) -> httpx.Headers:

src/openai/_models.py

+33-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import os
44
import inspect
5-
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
5+
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
66
from datetime import date, datetime
77
from typing_extensions import (
88
Unpack,
@@ -94,6 +94,23 @@ def model_fields_set(self) -> set[str]:
9494
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
9595
extra: Any = pydantic.Extra.allow # type: ignore
9696

97+
if TYPE_CHECKING:
98+
_request_id: Optional[str] = None
99+
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
100+
101+
This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
102+
103+
```py
104+
completion = await client.chat.completions.create(...)
105+
completion._request_id # req_id_xxx
106+
completion.usage._request_id # raises `AttributeError`
107+
```
108+
109+
Note: unlike other properties that use an `_` prefix, this property
110+
*is* public. Unless documented otherwise, all other `_` prefix properties,
111+
methods and modules are *private*.
112+
"""
113+
97114
def to_dict(
98115
self,
99116
*,
@@ -662,6 +679,21 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
662679
setattr(typ, "__pydantic_config__", config) # noqa: B010
663680

664681

682+
def add_request_id(obj: BaseModel, request_id: str | None) -> None:
683+
obj._request_id = request_id
684+
685+
# in Pydantic v1, using setattr like we do above causes the attribute
686+
# to be included when serializing the model which we don't want in this
687+
# case so we need to explicitly exclude it
688+
if not PYDANTIC_V2:
689+
try:
690+
exclude_fields = obj.__exclude_fields__ # type: ignore
691+
except AttributeError:
692+
cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
693+
else:
694+
cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
695+
696+
665697
# our use of subclasssing here causes weirdness for type checkers,
666698
# so we just pretend that we don't subclass
667699
if TYPE_CHECKING:

src/openai/_response.py

+9-3
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
from ._types import NoneType
2828
from ._utils import is_given, extract_type_arg, is_annotated_type, extract_type_var_from_base
29-
from ._models import BaseModel, is_basemodel
29+
from ._models import BaseModel, is_basemodel, add_request_id
3030
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
3131
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
3232
from ._exceptions import OpenAIError, APIResponseValidationError
@@ -315,8 +315,11 @@ class MyModel(BaseModel):
315315
if is_given(self._options.post_parser):
316316
parsed = self._options.post_parser(parsed)
317317

318+
if isinstance(parsed, BaseModel):
319+
add_request_id(parsed, self.request_id)
320+
318321
self._parsed_by_type[cache_key] = parsed
319-
return parsed
322+
return cast(R, parsed)
320323

321324
def read(self) -> bytes:
322325
"""Read and return the binary response content."""
@@ -419,8 +422,11 @@ class MyModel(BaseModel):
419422
if is_given(self._options.post_parser):
420423
parsed = self._options.post_parser(parsed)
421424

425+
if isinstance(parsed, BaseModel):
426+
add_request_id(parsed, self.request_id)
427+
422428
self._parsed_by_type[cache_key] = parsed
423-
return parsed
429+
return cast(R, parsed)
424430

425431
async def read(self) -> bytes:
426432
"""Read and return the binary response content."""

tests/test_legacy_response.py

+21
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,27 @@ def test_response_parse_custom_model(client: OpenAI) -> None:
6666
assert obj.bar == 2
6767

6868

69+
def test_response_basemodel_request_id(client: OpenAI) -> None:
70+
response = LegacyAPIResponse(
71+
raw=httpx.Response(
72+
200,
73+
headers={"x-request-id": "my-req-id"},
74+
content=json.dumps({"foo": "hello!", "bar": 2}),
75+
),
76+
client=client,
77+
stream=False,
78+
stream_cls=None,
79+
cast_to=str,
80+
options=FinalRequestOptions.construct(method="get", url="/foo"),
81+
)
82+
83+
obj = response.parse(to=CustomModel)
84+
assert obj._request_id == "my-req-id"
85+
assert obj.foo == "hello!"
86+
assert obj.bar == 2
87+
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
88+
89+
6990
def test_response_parse_annotated_type(client: OpenAI) -> None:
7091
response = LegacyAPIResponse(
7192
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),

tests/test_response.py

+43
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,49 @@ async def test_async_response_parse_custom_model(async_client: AsyncOpenAI) -> N
156156
assert obj.bar == 2
157157

158158

159+
def test_response_basemodel_request_id(client: OpenAI) -> None:
160+
response = APIResponse(
161+
raw=httpx.Response(
162+
200,
163+
headers={"x-request-id": "my-req-id"},
164+
content=json.dumps({"foo": "hello!", "bar": 2}),
165+
),
166+
client=client,
167+
stream=False,
168+
stream_cls=None,
169+
cast_to=str,
170+
options=FinalRequestOptions.construct(method="get", url="/foo"),
171+
)
172+
173+
obj = response.parse(to=CustomModel)
174+
assert obj._request_id == "my-req-id"
175+
assert obj.foo == "hello!"
176+
assert obj.bar == 2
177+
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
178+
179+
180+
@pytest.mark.asyncio
181+
async def test_async_response_basemodel_request_id(client: OpenAI) -> None:
182+
response = AsyncAPIResponse(
183+
raw=httpx.Response(
184+
200,
185+
headers={"x-request-id": "my-req-id"},
186+
content=json.dumps({"foo": "hello!", "bar": 2}),
187+
),
188+
client=client,
189+
stream=False,
190+
stream_cls=None,
191+
cast_to=str,
192+
options=FinalRequestOptions.construct(method="get", url="/foo"),
193+
)
194+
195+
obj = await response.parse(to=CustomModel)
196+
assert obj._request_id == "my-req-id"
197+
assert obj.foo == "hello!"
198+
assert obj.bar == 2
199+
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
200+
201+
159202
def test_response_parse_annotated_type(client: OpenAI) -> None:
160203
response = APIResponse(
161204
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),

0 commit comments

Comments
 (0)