Skip to content

Commit b7f2298

Browse files
committed
chore(internal): minor refactor of tests (#1471)
1 parent 2fcc0e4 commit b7f2298

File tree

2 files changed

+24
-24
lines changed

2 files changed

+24
-24
lines changed

tests/api_resources/audio/test_speech.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
2727
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
2828
speech = client.audio.speech.create(
2929
input="string",
30-
model="tts-1",
30+
model="string",
3131
voice="alloy",
3232
)
3333
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
3939
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
4040
speech = client.audio.speech.create(
4141
input="string",
42-
model="tts-1",
42+
model="string",
4343
voice="alloy",
4444
response_format="mp3",
4545
speed=0.25,
@@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No
5454

5555
response = client.audio.speech.with_raw_response.create(
5656
input="string",
57-
model="tts-1",
57+
model="string",
5858
voice="alloy",
5959
)
6060

@@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter)
6969
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
7070
with client.audio.speech.with_streaming_response.create(
7171
input="string",
72-
model="tts-1",
72+
model="string",
7373
voice="alloy",
7474
) as response:
7575
assert not response.is_closed
@@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo
9090
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
9191
speech = await async_client.audio.speech.create(
9292
input="string",
93-
model="tts-1",
93+
model="string",
9494
voice="alloy",
9595
)
9696
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
102102
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
103103
speech = await async_client.audio.speech.create(
104104
input="string",
105-
model="tts-1",
105+
model="string",
106106
voice="alloy",
107107
response_format="mp3",
108108
speed=0.25,
@@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock:
117117

118118
response = await async_client.audio.speech.with_raw_response.create(
119119
input="string",
120-
model="tts-1",
120+
model="string",
121121
voice="alloy",
122122
)
123123

@@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_
132132
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
133133
async with async_client.audio.speech.with_streaming_response.create(
134134
input="string",
135-
model="tts-1",
135+
model="string",
136136
voice="alloy",
137137
) as response:
138138
assert not response.is_closed

tests/api_resources/test_completions.py

+16-16
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ class TestCompletions:
2020
@parametrize
2121
def test_method_create_overload_1(self, client: OpenAI) -> None:
2222
completion = client.completions.create(
23-
model="gpt-3.5-turbo-instruct",
23+
model="string",
2424
prompt="This is a test.",
2525
)
2626
assert_matches_type(Completion, completion, path=["response"])
2727

2828
@parametrize
2929
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
3030
completion = client.completions.create(
31-
model="gpt-3.5-turbo-instruct",
31+
model="string",
3232
prompt="This is a test.",
3333
best_of=0,
3434
echo=True,
@@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5252
@parametrize
5353
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
5454
response = client.completions.with_raw_response.create(
55-
model="gpt-3.5-turbo-instruct",
55+
model="string",
5656
prompt="This is a test.",
5757
)
5858

@@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
6464
@parametrize
6565
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
6666
with client.completions.with_streaming_response.create(
67-
model="gpt-3.5-turbo-instruct",
67+
model="string",
6868
prompt="This is a test.",
6969
) as response:
7070
assert not response.is_closed
@@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
7878
@parametrize
7979
def test_method_create_overload_2(self, client: OpenAI) -> None:
8080
completion_stream = client.completions.create(
81-
model="gpt-3.5-turbo-instruct",
81+
model="string",
8282
prompt="This is a test.",
8383
stream=True,
8484
)
@@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
8787
@parametrize
8888
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
8989
completion_stream = client.completions.create(
90-
model="gpt-3.5-turbo-instruct",
90+
model="string",
9191
prompt="This is a test.",
9292
stream=True,
9393
best_of=0,
@@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
111111
@parametrize
112112
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
113113
response = client.completions.with_raw_response.create(
114-
model="gpt-3.5-turbo-instruct",
114+
model="string",
115115
prompt="This is a test.",
116116
stream=True,
117117
)
@@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
123123
@parametrize
124124
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
125125
with client.completions.with_streaming_response.create(
126-
model="gpt-3.5-turbo-instruct",
126+
model="string",
127127
prompt="This is a test.",
128128
stream=True,
129129
) as response:
@@ -142,15 +142,15 @@ class TestAsyncCompletions:
142142
@parametrize
143143
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
144144
completion = await async_client.completions.create(
145-
model="gpt-3.5-turbo-instruct",
145+
model="string",
146146
prompt="This is a test.",
147147
)
148148
assert_matches_type(Completion, completion, path=["response"])
149149

150150
@parametrize
151151
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
152152
completion = await async_client.completions.create(
153-
model="gpt-3.5-turbo-instruct",
153+
model="string",
154154
prompt="This is a test.",
155155
best_of=0,
156156
echo=True,
@@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
174174
@parametrize
175175
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
176176
response = await async_client.completions.with_raw_response.create(
177-
model="gpt-3.5-turbo-instruct",
177+
model="string",
178178
prompt="This is a test.",
179179
)
180180

@@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
186186
@parametrize
187187
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
188188
async with async_client.completions.with_streaming_response.create(
189-
model="gpt-3.5-turbo-instruct",
189+
model="string",
190190
prompt="This is a test.",
191191
) as response:
192192
assert not response.is_closed
@@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
200200
@parametrize
201201
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
202202
completion_stream = await async_client.completions.create(
203-
model="gpt-3.5-turbo-instruct",
203+
model="string",
204204
prompt="This is a test.",
205205
stream=True,
206206
)
@@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
209209
@parametrize
210210
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
211211
completion_stream = await async_client.completions.create(
212-
model="gpt-3.5-turbo-instruct",
212+
model="string",
213213
prompt="This is a test.",
214214
stream=True,
215215
best_of=0,
@@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
233233
@parametrize
234234
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
235235
response = await async_client.completions.with_raw_response.create(
236-
model="gpt-3.5-turbo-instruct",
236+
model="string",
237237
prompt="This is a test.",
238238
stream=True,
239239
)
@@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
245245
@parametrize
246246
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
247247
async with async_client.completions.with_streaming_response.create(
248-
model="gpt-3.5-turbo-instruct",
248+
model="string",
249249
prompt="This is a test.",
250250
stream=True,
251251
) as response:

0 commit comments

Comments
 (0)