Skip to content

Commit ad35f02

Browse files
authored
Merge pull request continuedev#3915 from continuedev/dallin/o1-timeout
LLM Test timeout
2 parents 0121119 + 30f4d63 commit ad35f02

File tree

1 file changed

+116
-87
lines changed

1 file changed

+116
-87
lines changed

core/llm/llm.test.ts

Lines changed: 116 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,13 @@ function testLLM(
2626
skip,
2727
testFim,
2828
testToolCall,
29-
}: { skip?: boolean; testFim?: boolean; testToolCall?: boolean },
29+
timeout,
30+
}: {
31+
skip?: boolean;
32+
testFim?: boolean;
33+
testToolCall?: boolean;
34+
timeout?: number;
35+
},
3036
) {
3137
if (skip) {
3238
return;
@@ -37,113 +43,136 @@ function testLLM(
3743
});
3844

3945
describe(llm.providerName + "/" + llm.model, () => {
40-
test("Stream Chat works", async () => {
41-
let total = "";
42-
for await (const chunk of llm.streamChat(
43-
[{ role: "user", content: "Hi" }],
44-
new AbortController().signal,
45-
)) {
46-
total += chunk.content;
47-
}
48-
49-
expect(total.length).toBeGreaterThan(0);
50-
return;
51-
});
52-
53-
test("Stream Complete works", async () => {
54-
let total = "";
55-
for await (const chunk of llm.streamComplete(
56-
"Hi",
57-
new AbortController().signal,
58-
)) {
59-
total += chunk;
60-
}
61-
62-
expect(total.length).toBeGreaterThan(0);
63-
return;
64-
});
65-
66-
test("Complete works", async () => {
67-
const completion = await llm.complete("Hi", new AbortController().signal);
68-
69-
expect(completion.length).toBeGreaterThan(0);
70-
return;
71-
});
46+
test(
47+
"Stream Chat works",
48+
async () => {
49+
let total = "";
50+
for await (const chunk of llm.streamChat(
51+
[{ role: "user", content: "Hi" }],
52+
new AbortController().signal,
53+
)) {
54+
total += chunk.content;
55+
}
7256

73-
if (testFim) {
74-
test("FIM works", async () => {
57+
expect(total.length).toBeGreaterThan(0);
58+
return;
59+
},
60+
timeout,
61+
);
62+
63+
test(
64+
"Stream Complete works",
65+
async () => {
7566
let total = "";
76-
for await (const chunk of llm.streamFim(
67+
for await (const chunk of llm.streamComplete(
7768
"Hi",
78-
"name is ChatGPT.",
7969
new AbortController().signal,
8070
)) {
8171
total += chunk;
8272
}
8373

8474
expect(total.length).toBeGreaterThan(0);
8575
return;
86-
});
76+
},
77+
timeout,
78+
);
79+
80+
test(
81+
"Complete works",
82+
async () => {
83+
const completion = await llm.complete(
84+
"Hi",
85+
new AbortController().signal,
86+
);
87+
88+
expect(completion.length).toBeGreaterThan(0);
89+
return;
90+
},
91+
timeout,
92+
);
93+
94+
if (testFim) {
95+
test(
96+
"FIM works",
97+
async () => {
98+
let total = "";
99+
for await (const chunk of llm.streamFim(
100+
"Hi",
101+
"name is ChatGPT.",
102+
new AbortController().signal,
103+
)) {
104+
total += chunk;
105+
}
106+
107+
expect(total.length).toBeGreaterThan(0);
108+
return;
109+
},
110+
timeout,
111+
);
87112
}
88113

89114
if (testToolCall) {
90-
test("Tool Call works", async () => {
91-
let args = "";
92-
let isFirstChunk = true;
93-
for await (const chunk of llm.streamChat(
94-
[{ role: "user", content: "Hi, my name is Nate." }],
95-
new AbortController().signal,
96-
{
97-
tools: [
98-
{
99-
displayTitle: "Say Hello",
100-
function: {
101-
name: "say_hello",
102-
description: "Say Hello",
103-
parameters: {
104-
type: "object",
105-
properties: {
106-
name: {
107-
type: "string",
108-
description: "The name of the person to greet",
115+
test(
116+
"Tool Call works",
117+
async () => {
118+
let args = "";
119+
let isFirstChunk = true;
120+
for await (const chunk of llm.streamChat(
121+
[{ role: "user", content: "Hi, my name is Nate." }],
122+
new AbortController().signal,
123+
{
124+
tools: [
125+
{
126+
displayTitle: "Say Hello",
127+
function: {
128+
name: "say_hello",
129+
description: "Say Hello",
130+
parameters: {
131+
type: "object",
132+
properties: {
133+
name: {
134+
type: "string",
135+
description: "The name of the person to greet",
136+
},
109137
},
110138
},
111139
},
140+
type: "function",
141+
wouldLikeTo: "Say hello",
142+
readonly: true,
112143
},
144+
],
145+
toolChoice: {
113146
type: "function",
114-
wouldLikeTo: "Say hello",
115-
readonly: true,
116-
},
117-
],
118-
toolChoice: {
119-
type: "function",
120-
function: {
121-
name: "say_hello",
147+
function: {
148+
name: "say_hello",
149+
},
122150
},
123151
},
124-
},
125-
)) {
126-
const typedChunk = chunk as AssistantChatMessage;
127-
if (!typedChunk.toolCalls) {
128-
continue;
152+
)) {
153+
const typedChunk = chunk as AssistantChatMessage;
154+
if (!typedChunk.toolCalls) {
155+
continue;
156+
}
157+
const toolCall = typedChunk.toolCalls[0];
158+
args += toolCall.function?.arguments ?? "";
159+
160+
expect(chunk.role).toBe("assistant");
161+
expect(chunk.content).toBe("");
162+
expect(typedChunk.toolCalls).toHaveLength(1);
163+
164+
if (isFirstChunk) {
165+
isFirstChunk = false;
166+
expect(toolCall.id).toBeDefined();
167+
expect(toolCall.function!.name).toBe("say_hello");
168+
}
129169
}
130-
const toolCall = typedChunk.toolCalls[0];
131-
args += toolCall.function?.arguments ?? "";
132170

133-
expect(chunk.role).toBe("assistant");
134-
expect(chunk.content).toBe("");
135-
expect(typedChunk.toolCalls).toHaveLength(1);
136-
137-
if (isFirstChunk) {
138-
isFirstChunk = false;
139-
expect(toolCall.id).toBeDefined();
140-
expect(toolCall.function!.name).toBe("say_hello");
141-
}
142-
}
143-
144-
const parsedArgs = JSON.parse(args);
145-
expect(parsedArgs.name).toBe("Nate");
146-
});
171+
const parsedArgs = JSON.parse(args);
172+
expect(parsedArgs.name).toBe("Nate");
173+
},
174+
timeout,
175+
);
147176
}
148177
});
149178
}
@@ -165,7 +194,7 @@ describe("LLM", () => {
165194
});
166195
testLLM(
167196
new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "o1-preview" }),
168-
{ skip: false },
197+
{ skip: false, timeout: 20000 },
169198
);
170199
testLLM(new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "o1" }), {
171200
skip: false,

0 commit comments

Comments
 (0)