Skip to content

Commit 2e10617

Browse files
committed
removed test added by mistake
1 parent 1cfb5b1 commit 2e10617

File tree

2 files changed

+1
-28
lines changed

2 files changed

+1
-28
lines changed

llama_stack/providers/utils/inference/openai_compat.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1287,6 +1287,7 @@ async def openai_completion(
12871287
user: str | None = None,
12881288
guided_choice: list[str] | None = None,
12891289
prompt_logprobs: int | None = None,
1290+
suffix: str | None = None,
12901291
) -> OpenAICompletion:
12911292
if stream:
12921293
raise ValueError(f"{self.__class__.__name__} doesn't support streaming openai completions")

tests/integration/inference/test_openai_completion.py

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -237,34 +237,6 @@ def test_openai_chat_completion_non_streaming(compat_client, client_with_models,
237237
assert expected.lower() in message_content
238238

239239

240-
@pytest.mark.parametrize(
241-
"test_case",
242-
[
243-
"inference:chat_completion:non_streaming_suffix_01",
244-
"inference:chat_completion:non_streaming_suffix_02",
245-
],
246-
)
247-
def test_openai_chat_completion_non_streaming_suffix(compat_client, client_with_models, text_model_id, test_case):
248-
skip_if_model_doesnt_support_openai_chat_completion(client_with_models, text_model_id)
249-
tc = TestCase(test_case)
250-
question = tc["question"]
251-
expected = tc["expected"]
252-
253-
response = compat_client.chat.completions.create(
254-
model=text_model_id,
255-
messages=[
256-
{
257-
"role": "user",
258-
"content": question,
259-
}
260-
],
261-
stream=False,
262-
)
263-
message_content = response.choices[0].message.content.lower().strip()
264-
assert len(message_content) > 0
265-
assert expected.lower() in message_content
266-
267-
268240
@pytest.mark.parametrize(
269241
"test_case",
270242
[

0 commit comments

Comments
 (0)