Skip to content

Commit 3e19a87

Browse files
author
Stainless Bot
committed
chore(internal): bump ruff version (#1604)
1 parent a5f5c8e commit 3e19a87

16 files changed

+76
-142
lines changed

pyproject.toml

+7-5
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ format = { chain = [
8383
"check:ruff",
8484
"typecheck",
8585
]}
86-
"check:ruff" = "ruff ."
87-
"fix:ruff" = "ruff --fix ."
86+
"check:ruff" = "ruff check ."
87+
"fix:ruff" = "ruff check --fix ."
8888

8989
typecheck = { chain = [
9090
"typecheck:pyright",
@@ -168,6 +168,11 @@ reportPrivateUsage = false
168168
line-length = 120
169169
output-format = "grouped"
170170
target-version = "py37"
171+
172+
[tool.ruff.format]
173+
docstring-code-format = true
174+
175+
[tool.ruff.lint]
171176
select = [
172177
# isort
173178
"I",
@@ -198,9 +203,6 @@ unfixable = [
198203
]
199204
ignore-init-module-imports = true
200205

201-
[tool.ruff.format]
202-
docstring-code-format = true
203-
204206
[tool.ruff.lint.flake8-tidy-imports.banned-api]
205207
"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead"
206208

requirements-dev.lock

+1-1
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ requests==2.31.0
139139
respx==0.20.2
140140
rich==13.7.1
141141
# via inline-snapshot
142-
ruff==0.1.9
142+
ruff==0.5.6
143143
setuptools==68.2.2
144144
# via nodeenv
145145
six==1.16.0

src/openai/_base_client.py

+21-42
Original file line numberDiff line numberDiff line change
@@ -125,16 +125,14 @@ def __init__(
125125
self,
126126
*,
127127
url: URL,
128-
) -> None:
129-
...
128+
) -> None: ...
130129

131130
@overload
132131
def __init__(
133132
self,
134133
*,
135134
params: Query,
136-
) -> None:
137-
...
135+
) -> None: ...
138136

139137
def __init__(
140138
self,
@@ -167,8 +165,7 @@ def has_next_page(self) -> bool:
167165
return False
168166
return self.next_page_info() is not None
169167

170-
def next_page_info(self) -> Optional[PageInfo]:
171-
...
168+
def next_page_info(self) -> Optional[PageInfo]: ...
172169

173170
def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
174171
...
@@ -904,8 +901,7 @@ def request(
904901
*,
905902
stream: Literal[True],
906903
stream_cls: Type[_StreamT],
907-
) -> _StreamT:
908-
...
904+
) -> _StreamT: ...
909905

910906
@overload
911907
def request(
@@ -915,8 +911,7 @@ def request(
915911
remaining_retries: Optional[int] = None,
916912
*,
917913
stream: Literal[False] = False,
918-
) -> ResponseT:
919-
...
914+
) -> ResponseT: ...
920915

921916
@overload
922917
def request(
@@ -927,8 +922,7 @@ def request(
927922
*,
928923
stream: bool = False,
929924
stream_cls: Type[_StreamT] | None = None,
930-
) -> ResponseT | _StreamT:
931-
...
925+
) -> ResponseT | _StreamT: ...
932926

933927
def request(
934928
self,
@@ -1172,8 +1166,7 @@ def get(
11721166
cast_to: Type[ResponseT],
11731167
options: RequestOptions = {},
11741168
stream: Literal[False] = False,
1175-
) -> ResponseT:
1176-
...
1169+
) -> ResponseT: ...
11771170

11781171
@overload
11791172
def get(
@@ -1184,8 +1177,7 @@ def get(
11841177
options: RequestOptions = {},
11851178
stream: Literal[True],
11861179
stream_cls: type[_StreamT],
1187-
) -> _StreamT:
1188-
...
1180+
) -> _StreamT: ...
11891181

11901182
@overload
11911183
def get(
@@ -1196,8 +1188,7 @@ def get(
11961188
options: RequestOptions = {},
11971189
stream: bool,
11981190
stream_cls: type[_StreamT] | None = None,
1199-
) -> ResponseT | _StreamT:
1200-
...
1191+
) -> ResponseT | _StreamT: ...
12011192

12021193
def get(
12031194
self,
@@ -1223,8 +1214,7 @@ def post(
12231214
options: RequestOptions = {},
12241215
files: RequestFiles | None = None,
12251216
stream: Literal[False] = False,
1226-
) -> ResponseT:
1227-
...
1217+
) -> ResponseT: ...
12281218

12291219
@overload
12301220
def post(
@@ -1237,8 +1227,7 @@ def post(
12371227
files: RequestFiles | None = None,
12381228
stream: Literal[True],
12391229
stream_cls: type[_StreamT],
1240-
) -> _StreamT:
1241-
...
1230+
) -> _StreamT: ...
12421231

12431232
@overload
12441233
def post(
@@ -1251,8 +1240,7 @@ def post(
12511240
files: RequestFiles | None = None,
12521241
stream: bool,
12531242
stream_cls: type[_StreamT] | None = None,
1254-
) -> ResponseT | _StreamT:
1255-
...
1243+
) -> ResponseT | _StreamT: ...
12561244

12571245
def post(
12581246
self,
@@ -1485,8 +1473,7 @@ async def request(
14851473
*,
14861474
stream: Literal[False] = False,
14871475
remaining_retries: Optional[int] = None,
1488-
) -> ResponseT:
1489-
...
1476+
) -> ResponseT: ...
14901477

14911478
@overload
14921479
async def request(
@@ -1497,8 +1484,7 @@ async def request(
14971484
stream: Literal[True],
14981485
stream_cls: type[_AsyncStreamT],
14991486
remaining_retries: Optional[int] = None,
1500-
) -> _AsyncStreamT:
1501-
...
1487+
) -> _AsyncStreamT: ...
15021488

15031489
@overload
15041490
async def request(
@@ -1509,8 +1495,7 @@ async def request(
15091495
stream: bool,
15101496
stream_cls: type[_AsyncStreamT] | None = None,
15111497
remaining_retries: Optional[int] = None,
1512-
) -> ResponseT | _AsyncStreamT:
1513-
...
1498+
) -> ResponseT | _AsyncStreamT: ...
15141499

15151500
async def request(
15161501
self,
@@ -1739,8 +1724,7 @@ async def get(
17391724
cast_to: Type[ResponseT],
17401725
options: RequestOptions = {},
17411726
stream: Literal[False] = False,
1742-
) -> ResponseT:
1743-
...
1727+
) -> ResponseT: ...
17441728

17451729
@overload
17461730
async def get(
@@ -1751,8 +1735,7 @@ async def get(
17511735
options: RequestOptions = {},
17521736
stream: Literal[True],
17531737
stream_cls: type[_AsyncStreamT],
1754-
) -> _AsyncStreamT:
1755-
...
1738+
) -> _AsyncStreamT: ...
17561739

17571740
@overload
17581741
async def get(
@@ -1763,8 +1746,7 @@ async def get(
17631746
options: RequestOptions = {},
17641747
stream: bool,
17651748
stream_cls: type[_AsyncStreamT] | None = None,
1766-
) -> ResponseT | _AsyncStreamT:
1767-
...
1749+
) -> ResponseT | _AsyncStreamT: ...
17681750

17691751
async def get(
17701752
self,
@@ -1788,8 +1770,7 @@ async def post(
17881770
files: RequestFiles | None = None,
17891771
options: RequestOptions = {},
17901772
stream: Literal[False] = False,
1791-
) -> ResponseT:
1792-
...
1773+
) -> ResponseT: ...
17931774

17941775
@overload
17951776
async def post(
@@ -1802,8 +1783,7 @@ async def post(
18021783
options: RequestOptions = {},
18031784
stream: Literal[True],
18041785
stream_cls: type[_AsyncStreamT],
1805-
) -> _AsyncStreamT:
1806-
...
1786+
) -> _AsyncStreamT: ...
18071787

18081788
@overload
18091789
async def post(
@@ -1816,8 +1796,7 @@ async def post(
18161796
options: RequestOptions = {},
18171797
stream: bool,
18181798
stream_cls: type[_AsyncStreamT] | None = None,
1819-
) -> ResponseT | _AsyncStreamT:
1820-
...
1799+
) -> ResponseT | _AsyncStreamT: ...
18211800

18221801
async def post(
18231802
self,

src/openai/_compat.py

+8-16
Original file line numberDiff line numberDiff line change
@@ -159,22 +159,19 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
159159
# generic models
160160
if TYPE_CHECKING:
161161

162-
class GenericModel(pydantic.BaseModel):
163-
...
162+
class GenericModel(pydantic.BaseModel): ...
164163

165164
else:
166165
if PYDANTIC_V2:
167166
# there no longer needs to be a distinction in v2 but
168167
# we still have to create our own subclass to avoid
169168
# inconsistent MRO ordering errors
170-
class GenericModel(pydantic.BaseModel):
171-
...
169+
class GenericModel(pydantic.BaseModel): ...
172170

173171
else:
174172
import pydantic.generics
175173

176-
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
177-
...
174+
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
178175

179176

180177
# cached properties
@@ -193,26 +190,21 @@ class typed_cached_property(Generic[_T]):
193190
func: Callable[[Any], _T]
194191
attrname: str | None
195192

196-
def __init__(self, func: Callable[[Any], _T]) -> None:
197-
...
193+
def __init__(self, func: Callable[[Any], _T]) -> None: ...
198194

199195
@overload
200-
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
201-
...
196+
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
202197

203198
@overload
204-
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
205-
...
199+
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
206200

207201
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
208202
raise NotImplementedError()
209203

210-
def __set_name__(self, owner: type[Any], name: str) -> None:
211-
...
204+
def __set_name__(self, owner: type[Any], name: str) -> None: ...
212205

213206
# __set__ is not defined at runtime, but @cached_property is designed to be settable
214-
def __set__(self, instance: object, value: _T) -> None:
215-
...
207+
def __set__(self, instance: object, value: _T) -> None: ...
216208
else:
217209
try:
218210
from functools import cached_property as cached_property

src/openai/_files.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -39,13 +39,11 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
3939

4040

4141
@overload
42-
def to_httpx_files(files: None) -> None:
43-
...
42+
def to_httpx_files(files: None) -> None: ...
4443

4544

4645
@overload
47-
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
48-
...
46+
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
4947

5048

5149
def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
@@ -83,13 +81,11 @@ def _read_file_content(file: FileContent) -> HttpxFileContent:
8381

8482

8583
@overload
86-
async def async_to_httpx_files(files: None) -> None:
87-
...
84+
async def async_to_httpx_files(files: None) -> None: ...
8885

8986

9087
@overload
91-
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
92-
...
88+
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
9389

9490

9591
async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:

src/openai/_legacy_response.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -92,12 +92,10 @@ def request_id(self) -> str | None:
9292
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
9393

9494
@overload
95-
def parse(self, *, to: type[_T]) -> _T:
96-
...
95+
def parse(self, *, to: type[_T]) -> _T: ...
9796

9897
@overload
99-
def parse(self) -> R:
100-
...
98+
def parse(self) -> R: ...
10199

102100
def parse(self, *, to: type[_T] | None = None) -> R | _T:
103101
"""Returns the rich python representation of this response's data.

src/openai/_response.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -268,12 +268,10 @@ def request_id(self) -> str | None:
268268
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
269269

270270
@overload
271-
def parse(self, *, to: type[_T]) -> _T:
272-
...
271+
def parse(self, *, to: type[_T]) -> _T: ...
273272

274273
@overload
275-
def parse(self) -> R:
276-
...
274+
def parse(self) -> R: ...
277275

278276
def parse(self, *, to: type[_T] | None = None) -> R | _T:
279277
"""Returns the rich python representation of this response's data.
@@ -376,12 +374,10 @@ def request_id(self) -> str | None:
376374
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
377375

378376
@overload
379-
async def parse(self, *, to: type[_T]) -> _T:
380-
...
377+
async def parse(self, *, to: type[_T]) -> _T: ...
381378

382379
@overload
383-
async def parse(self) -> R:
384-
...
380+
async def parse(self) -> R: ...
385381

386382
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
387383
"""Returns the rich python representation of this response's data.

0 commit comments

Comments
 (0)