@@ -26,7 +26,13 @@ function testLLM(
26
26
skip,
27
27
testFim,
28
28
testToolCall,
29
- } : { skip ?: boolean ; testFim ?: boolean ; testToolCall ?: boolean } ,
29
+ timeout,
30
+ } : {
31
+ skip ?: boolean ;
32
+ testFim ?: boolean ;
33
+ testToolCall ?: boolean ;
34
+ timeout ?: number ;
35
+ } ,
30
36
) {
31
37
if ( skip ) {
32
38
return ;
@@ -37,113 +43,136 @@ function testLLM(
37
43
} ) ;
38
44
39
45
describe ( llm . providerName + "/" + llm . model , ( ) => {
40
- test ( "Stream Chat works" , async ( ) => {
41
- let total = "" ;
42
- for await ( const chunk of llm . streamChat (
43
- [ { role : "user" , content : "Hi" } ] ,
44
- new AbortController ( ) . signal ,
45
- ) ) {
46
- total += chunk . content ;
47
- }
48
-
49
- expect ( total . length ) . toBeGreaterThan ( 0 ) ;
50
- return ;
51
- } ) ;
52
-
53
- test ( "Stream Complete works" , async ( ) => {
54
- let total = "" ;
55
- for await ( const chunk of llm . streamComplete (
56
- "Hi" ,
57
- new AbortController ( ) . signal ,
58
- ) ) {
59
- total += chunk ;
60
- }
61
-
62
- expect ( total . length ) . toBeGreaterThan ( 0 ) ;
63
- return ;
64
- } ) ;
65
-
66
- test ( "Complete works" , async ( ) => {
67
- const completion = await llm . complete ( "Hi" , new AbortController ( ) . signal ) ;
68
-
69
- expect ( completion . length ) . toBeGreaterThan ( 0 ) ;
70
- return ;
71
- } ) ;
46
+ test (
47
+ "Stream Chat works" ,
48
+ async ( ) => {
49
+ let total = "" ;
50
+ for await ( const chunk of llm . streamChat (
51
+ [ { role : "user" , content : "Hi" } ] ,
52
+ new AbortController ( ) . signal ,
53
+ ) ) {
54
+ total += chunk . content ;
55
+ }
72
56
73
- if ( testFim ) {
74
- test ( "FIM works" , async ( ) => {
57
+ expect ( total . length ) . toBeGreaterThan ( 0 ) ;
58
+ return ;
59
+ } ,
60
+ timeout ,
61
+ ) ;
62
+
63
+ test (
64
+ "Stream Complete works" ,
65
+ async ( ) => {
75
66
let total = "" ;
76
- for await ( const chunk of llm . streamFim (
67
+ for await ( const chunk of llm . streamComplete (
77
68
"Hi" ,
78
- "name is ChatGPT." ,
79
69
new AbortController ( ) . signal ,
80
70
) ) {
81
71
total += chunk ;
82
72
}
83
73
84
74
expect ( total . length ) . toBeGreaterThan ( 0 ) ;
85
75
return ;
86
- } ) ;
76
+ } ,
77
+ timeout ,
78
+ ) ;
79
+
80
+ test (
81
+ "Complete works" ,
82
+ async ( ) => {
83
+ const completion = await llm . complete (
84
+ "Hi" ,
85
+ new AbortController ( ) . signal ,
86
+ ) ;
87
+
88
+ expect ( completion . length ) . toBeGreaterThan ( 0 ) ;
89
+ return ;
90
+ } ,
91
+ timeout ,
92
+ ) ;
93
+
94
+ if ( testFim ) {
95
+ test (
96
+ "FIM works" ,
97
+ async ( ) => {
98
+ let total = "" ;
99
+ for await ( const chunk of llm . streamFim (
100
+ "Hi" ,
101
+ "name is ChatGPT." ,
102
+ new AbortController ( ) . signal ,
103
+ ) ) {
104
+ total += chunk ;
105
+ }
106
+
107
+ expect ( total . length ) . toBeGreaterThan ( 0 ) ;
108
+ return ;
109
+ } ,
110
+ timeout ,
111
+ ) ;
87
112
}
88
113
89
114
if ( testToolCall ) {
90
- test ( "Tool Call works" , async ( ) => {
91
- let args = "" ;
92
- let isFirstChunk = true ;
93
- for await ( const chunk of llm . streamChat (
94
- [ { role : "user" , content : "Hi, my name is Nate." } ] ,
95
- new AbortController ( ) . signal ,
96
- {
97
- tools : [
98
- {
99
- displayTitle : "Say Hello" ,
100
- function : {
101
- name : "say_hello" ,
102
- description : "Say Hello" ,
103
- parameters : {
104
- type : "object" ,
105
- properties : {
106
- name : {
107
- type : "string" ,
108
- description : "The name of the person to greet" ,
115
+ test (
116
+ "Tool Call works" ,
117
+ async ( ) => {
118
+ let args = "" ;
119
+ let isFirstChunk = true ;
120
+ for await ( const chunk of llm . streamChat (
121
+ [ { role : "user" , content : "Hi, my name is Nate." } ] ,
122
+ new AbortController ( ) . signal ,
123
+ {
124
+ tools : [
125
+ {
126
+ displayTitle : "Say Hello" ,
127
+ function : {
128
+ name : "say_hello" ,
129
+ description : "Say Hello" ,
130
+ parameters : {
131
+ type : "object" ,
132
+ properties : {
133
+ name : {
134
+ type : "string" ,
135
+ description : "The name of the person to greet" ,
136
+ } ,
109
137
} ,
110
138
} ,
111
139
} ,
140
+ type : "function" ,
141
+ wouldLikeTo : "Say hello" ,
142
+ readonly : true ,
112
143
} ,
144
+ ] ,
145
+ toolChoice : {
113
146
type : "function" ,
114
- wouldLikeTo : "Say hello" ,
115
- readonly : true ,
116
- } ,
117
- ] ,
118
- toolChoice : {
119
- type : "function" ,
120
- function : {
121
- name : "say_hello" ,
147
+ function : {
148
+ name : "say_hello" ,
149
+ } ,
122
150
} ,
123
151
} ,
124
- } ,
125
- ) ) {
126
- const typedChunk = chunk as AssistantChatMessage ;
127
- if ( ! typedChunk . toolCalls ) {
128
- continue ;
152
+ ) ) {
153
+ const typedChunk = chunk as AssistantChatMessage ;
154
+ if ( ! typedChunk . toolCalls ) {
155
+ continue ;
156
+ }
157
+ const toolCall = typedChunk . toolCalls [ 0 ] ;
158
+ args += toolCall . function ?. arguments ?? "" ;
159
+
160
+ expect ( chunk . role ) . toBe ( "assistant" ) ;
161
+ expect ( chunk . content ) . toBe ( "" ) ;
162
+ expect ( typedChunk . toolCalls ) . toHaveLength ( 1 ) ;
163
+
164
+ if ( isFirstChunk ) {
165
+ isFirstChunk = false ;
166
+ expect ( toolCall . id ) . toBeDefined ( ) ;
167
+ expect ( toolCall . function ! . name ) . toBe ( "say_hello" ) ;
168
+ }
129
169
}
130
- const toolCall = typedChunk . toolCalls [ 0 ] ;
131
- args += toolCall . function ?. arguments ?? "" ;
132
170
133
- expect ( chunk . role ) . toBe ( "assistant" ) ;
134
- expect ( chunk . content ) . toBe ( "" ) ;
135
- expect ( typedChunk . toolCalls ) . toHaveLength ( 1 ) ;
136
-
137
- if ( isFirstChunk ) {
138
- isFirstChunk = false ;
139
- expect ( toolCall . id ) . toBeDefined ( ) ;
140
- expect ( toolCall . function ! . name ) . toBe ( "say_hello" ) ;
141
- }
142
- }
143
-
144
- const parsedArgs = JSON . parse ( args ) ;
145
- expect ( parsedArgs . name ) . toBe ( "Nate" ) ;
146
- } ) ;
171
+ const parsedArgs = JSON . parse ( args ) ;
172
+ expect ( parsedArgs . name ) . toBe ( "Nate" ) ;
173
+ } ,
174
+ timeout ,
175
+ ) ;
147
176
}
148
177
} ) ;
149
178
}
@@ -165,7 +194,7 @@ describe("LLM", () => {
165
194
} ) ;
166
195
testLLM (
167
196
new OpenAI ( { apiKey : process . env . OPENAI_API_KEY , model : "o1-preview" } ) ,
168
- { skip : false } ,
197
+ { skip : false , timeout : 20000 } ,
169
198
) ;
170
199
testLLM ( new OpenAI ( { apiKey : process . env . OPENAI_API_KEY , model : "o1" } ) , {
171
200
skip : false ,
0 commit comments