@@ -143,7 +143,6 @@ export declare interface StartChatSessionRequest extends StartChatParams {
143
143
* Session for a multiturn chat with the model
144
144
*/
145
145
export class ChatSession {
146
- // Substitute apiKey for these in Labs
147
146
private project : string ;
148
147
private location : string ;
149
148
@@ -157,7 +156,7 @@ export class ChatSession {
157
156
get history ( ) : Content [ ] {
158
157
return this . historyInternal ;
159
158
}
160
-
159
+
161
160
constructor ( request : StartChatSessionRequest ) {
162
161
this . project = request . _vertex_instance . project ;
163
162
this . location = request . _vertex_instance . location ;
@@ -166,48 +165,62 @@ export class ChatSession {
166
165
this . _vertex_instance = request . _vertex_instance ;
167
166
}
168
167
169
- // TODO: add streamSendMessage that calls streamGenerateContent
170
168
async sendMessage ( request : string |
171
169
Array < string | Part > ) : Promise < GenerateContentResult > {
172
- let newParts : Part [ ] = [ ] ;
173
-
174
- if ( typeof request === 'string' ) {
175
- newParts = [ { text : request } ] ;
176
- } else if ( Array . isArray ( request ) ) {
177
- for ( const item of request ) {
178
- if ( typeof item === 'string' ) {
179
- newParts . push ( { text : item } ) ;
180
- } else {
181
- newParts . push ( item ) ;
182
- }
183
- }
184
- } ;
185
-
186
- const newContent : Content = { role : 'user' , parts : newParts } ;
187
-
170
+ const newContent : Content = formulateNewContent ( request ) ;
188
171
let generateContentrequest : GenerateContentRequest = {
189
172
contents : this . historyInternal . concat ( [ newContent ] ) ,
190
173
safety_settings : this . safety_settings ,
191
174
generation_config : this . generation_config ,
192
175
} ;
193
176
194
- const generateContentResponse =
177
+ const generateContentResult =
195
178
await this . _model_instance . generateContent ( generateContentrequest ) ;
196
-
179
+ const generateContentResponse = await generateContentResult . response ;
197
180
// Only push the latest message to history if the response returned a result
198
- if ( generateContentResponse . response . candidates . length !== 0 ) {
181
+ if ( generateContentResponse . candidates . length !== 0 ) {
199
182
this . historyInternal . push ( newContent ) ;
200
183
this . historyInternal . push (
201
- generateContentResponse . response . candidates [ 0 ] . content ) ;
184
+ generateContentResponse . candidates [ 0 ] . content ) ;
202
185
} else {
203
186
// TODO: handle promptFeedback in the response
204
- throw new Error ( 'Did not get a response from the model' ) ;
187
+ throw new Error ( 'Did not get a candidate from the model' ) ;
205
188
}
206
189
207
- return generateContentResponse ;
190
+ return Promise . resolve ( { response : generateContentResponse } ) ;
208
191
}
209
- }
210
192
193
+ async streamSendMessage ( request : string |
194
+ Array < string | Part > ) : Promise < StreamGenerateContentResult > {
195
+ const newContent : Content = formulateNewContent ( request ) ;
196
+ let generateContentrequest : GenerateContentRequest = {
197
+ contents : this . historyInternal . concat ( [ newContent ] ) ,
198
+ safety_settings : this . safety_settings ,
199
+ generation_config : this . generation_config ,
200
+ } ;
201
+
202
+ const streamGenerateContentResult =
203
+ await this . _model_instance . streamGenerateContent ( generateContentrequest ) ;
204
+ const streamGenerateContentResponse =
205
+ await streamGenerateContentResult . response ;
206
+ // Only push the latest message to history if the response returned a result
207
+ if ( streamGenerateContentResponse . candidates . length !== 0 ) {
208
+ this . historyInternal . push ( newContent ) ;
209
+ this . historyInternal . push (
210
+ streamGenerateContentResponse . candidates [ 0 ] . content ) ;
211
+ } else {
212
+ // TODO: handle promptFeedback in the response
213
+ throw new Error ( 'Did not get a candidate from the model' ) ;
214
+ }
215
+
216
+ return Promise . resolve (
217
+ {
218
+ response : Promise . resolve ( streamGenerateContentResponse ) ,
219
+ stream : streamGenerateContentResult . stream ,
220
+ }
221
+ ) ;
222
+ }
223
+ }
211
224
212
225
/**
213
226
* Base class for generative models.
@@ -345,7 +358,6 @@ export class GenerativeModel {
345
358
}
346
359
}
347
360
348
-
349
361
startChat ( request : StartChatParams ) : ChatSession {
350
362
const startChatRequest = {
351
363
history : request . history ,
@@ -358,3 +370,23 @@ export class GenerativeModel {
358
370
return new ChatSession ( startChatRequest ) ;
359
371
}
360
372
}
373
+
374
+ function formulateNewContent ( request : string | Array < string | Part > ) : Content {
375
+
376
+ let newParts : Part [ ] = [ ] ;
377
+
378
+ if ( typeof request === 'string' ) {
379
+ newParts = [ { text : request } ] ;
380
+ } else if ( Array . isArray ( request ) ) {
381
+ for ( const item of request ) {
382
+ if ( typeof item === 'string' ) {
383
+ newParts . push ( { text : item } ) ;
384
+ } else {
385
+ newParts . push ( item ) ;
386
+ }
387
+ }
388
+ } ;
389
+
390
+ const newContent : Content = { role : 'user' , parts : newParts } ;
391
+ return newContent ;
392
+ }
0 commit comments