Skip to content

Commit 6c0c31c

Browse files
yyyu-googlecopybara-github
authored andcommitted
fix: remove defaulting value of candidates in unary api. remove unused variables and imports. remove throwing GoogleAIError when candidates undefined or empty.
PiperOrigin-RevId: 618243333
1 parent 673bcb3 commit 6c0c31c

File tree

6 files changed

+1
-150
lines changed

6 files changed

+1
-150
lines changed

src/functions/post_fetch_processing.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ export async function processUnary(
357357
}
358358

359359
return Promise.resolve({
360-
response: {candidates: []},
360+
response: {} as GenerateContentResponse,
361361
});
362362
}
363363

src/functions/pre_fetch_processing.ts

-2
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,10 @@
1616
*/
1717

1818
import {
19-
Content,
2019
GenerateContentRequest,
2120
GenerationConfig,
2221
SafetySetting,
2322
} from '../types/content';
24-
import {ClientError} from '../types/errors';
2523
import * as constants from '../util/constants';
2624

2725
export function formatContentRequest(

src/functions/test/functions_test.ts

-58
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import {
3131
StreamGenerateContentResult,
3232
Tool,
3333
} from '../../types';
34-
import {FunctionCall} from '../../types/content';
3534
import {constants} from '../../util';
3635
import {countTokens} from '../count_tokens';
3736
import {generateContent, generateContentStream} from '../generate_content';
@@ -157,25 +156,6 @@ const TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL = {
157156
candidates: TEST_CANDIDATES_WITH_FUNCTION_CALL,
158157
};
159158

160-
const TEST_FUNCTION_RESPONSE_PART = [
161-
{
162-
functionResponse: {
163-
name: 'get_current_weather',
164-
response: {name: 'get_current_weather', content: {weather: 'super nice'}},
165-
},
166-
},
167-
];
168-
169-
const TEST_CANDIDATES_MISSING_ROLE = [
170-
{
171-
index: 1,
172-
content: {parts: [{text: 'Im doing great! How are you?'}]},
173-
finish_reason: 0,
174-
finish_message: '',
175-
safety_ratings: TEST_SAFETY_RATINGS,
176-
},
177-
];
178-
179159
const TEST_ENDPOINT_BASE_PATH = 'test.googleapis.com';
180160
const TEST_GCS_FILENAME = 'gs://test_bucket/test_image.jpeg';
181161

@@ -311,8 +291,6 @@ describe('countTokens', () => {
311291
status: 'INTERNAL_SERVER_ERROR',
312292
};
313293
const response = new Response(JSON.stringify(body), fetch500Obj);
314-
const expectedErrorMessage =
315-
'[VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"code":500,"message":"service is having downtime","status":"INTERNAL_SERVER_ERROR"}';
316294
spyOn(global, 'fetch').and.resolveTo(response);
317295

318296
await expectAsync(
@@ -325,17 +303,6 @@ describe('countTokens', () => {
325303
TEST_API_ENDPOINT
326304
)
327305
).toBeRejected();
328-
// TODO: update jasmine version or use flush to uncomment
329-
// await countTokens(
330-
// TEST_LOCATION,
331-
// TEST_PROJECT,
332-
// TEST_PUBLISHER_MODEL_ENDPOINT,
333-
// TEST_TOKEN_PROMISE,
334-
// req,
335-
// TEST_API_ENDPOINT
336-
// ).catch(e => {
337-
// expect(e.message).toEqual(expectedErrorMessage);
338-
// });
339306
});
340307

341308
it('throw ClientError when not OK and 4XX', async () => {
@@ -350,8 +317,6 @@ describe('countTokens', () => {
350317
status: 'INVALID_ARGUMENT',
351318
};
352319
const response = new Response(JSON.stringify(body), fetch400Obj);
353-
const expectedErrorMessage =
354-
'[VertexAI.ClientError]: got status: 400 Bad Request. {"code":400,"message":"request is invalid","status":"INVALID_ARGUMENT"}';
355320
spyOn(global, 'fetch').and.resolveTo(response);
356321

357322
await expectAsync(
@@ -364,17 +329,6 @@ describe('countTokens', () => {
364329
TEST_API_ENDPOINT
365330
)
366331
).toBeRejected();
367-
// TODO: update jasmine version or use flush to uncomment
368-
// await countTokens(
369-
// TEST_LOCATION,
370-
// TEST_PROJECT,
371-
// TEST_PUBLISHER_MODEL_ENDPOINT,
372-
// TEST_TOKEN_PROMISE,
373-
// req,
374-
// TEST_API_ENDPOINT
375-
// ).catch(e => {
376-
// expect(e.message).toEqual(expectedErrorMessage);
377-
// });
378332
});
379333
});
380334

@@ -503,9 +457,6 @@ describe('generateContent', () => {
503457
const req: GenerateContentRequest = {
504458
contents: TEST_USER_CHAT_MESSAGE,
505459
};
506-
const expectedResult: GenerateContentResult = {
507-
response: TEST_MODEL_RESPONSE,
508-
};
509460
fetchSpy.and.resolveTo(buildFetchResponse(TEST_MODEL_RESPONSE));
510461
await generateContent(
511462
TEST_LOCATION,
@@ -526,9 +477,6 @@ describe('generateContent', () => {
526477
generationConfig: {topK: 0},
527478
safetySettings: [],
528479
};
529-
const expectedResult: GenerateContentResult = {
530-
response: TEST_MODEL_RESPONSE,
531-
};
532480
fetchSpy.and.resolveTo(buildFetchResponse(TEST_MODEL_RESPONSE));
533481
await generateContent(
534482
TEST_LOCATION,
@@ -550,9 +498,6 @@ describe('generateContent', () => {
550498
generationConfig: {topK: 1},
551499
safetySettings: [],
552500
};
553-
const expectedResult: GenerateContentResult = {
554-
response: TEST_MODEL_RESPONSE,
555-
};
556501
fetchSpy.and.resolveTo(buildFetchResponse(TEST_MODEL_RESPONSE));
557502
await generateContent(
558503
TEST_LOCATION,
@@ -572,9 +517,6 @@ describe('generateContent', () => {
572517
const req: GenerateContentRequest = {
573518
contents: TEST_USER_CHAT_MESSAGE,
574519
};
575-
const expectedResult: GenerateContentResult = {
576-
response: TEST_MODEL_RESPONSE,
577-
};
578520
fetchSpy.and.resolveTo(buildFetchResponse(TEST_MODEL_RESPONSE));
579521
const resp = await generateContent(
580522
TEST_LOCATION,

src/models/chat_session.ts

-45
Original file line numberDiff line numberDiff line change
@@ -155,17 +155,6 @@ export class ChatSession {
155155
const contentFromAssistant =
156156
generateContentResponse.candidates[0].content;
157157
this.historyInternal.push(contentFromAssistant);
158-
} else {
159-
const promptFeedback = generateContentResponse.promptFeedback;
160-
if (promptFeedback) {
161-
errorMessage = `Model did not return candidate, but provided prompt feedback: ${JSON.stringify(
162-
promptFeedback
163-
)}`;
164-
} else {
165-
errorMessage =
166-
'Model did not return candidate, could not find any prompt feedback from model as well';
167-
}
168-
throw new GoogleGenerativeAIError(errorMessage);
169158
}
170159

171160
return Promise.resolve(generateContentResult);
@@ -189,17 +178,6 @@ export class ChatSession {
189178
const contentFromAssistant =
190179
streamGenerateContentResponse.candidates[0].content;
191180
this.historyInternal.push(contentFromAssistant);
192-
} else {
193-
const promptFeedback = streamGenerateContentResponse.promptFeedback;
194-
if (promptFeedback) {
195-
errorMessage = `Model did not return candidate, but provided prompt feedback: ${JSON.stringify(
196-
promptFeedback
197-
)}`;
198-
} else {
199-
errorMessage =
200-
'Model did not return candidate, could not find any prompt feedback from model as well';
201-
}
202-
throw new GoogleGenerativeAIError(errorMessage);
203181
}
204182
}
205183

@@ -325,7 +303,6 @@ export class ChatSessionPreview {
325303
async sendMessage(
326304
request: string | Array<string | Part>
327305
): Promise<GenerateContentResult> {
328-
let errorMessage: string;
329306
const newContent: Content[] =
330307
formulateNewContentFromSendMessageRequest(request);
331308
const generateContentrequest: GenerateContentRequest = {
@@ -359,17 +336,6 @@ export class ChatSessionPreview {
359336
const contentFromAssistant =
360337
generateContentResponse.candidates[0].content;
361338
this.historyInternal.push(contentFromAssistant);
362-
} else {
363-
const promptFeedback = generateContentResponse.promptFeedback;
364-
if (promptFeedback) {
365-
errorMessage = `Model did not return candidate, but provided prompt feedback: ${JSON.stringify(
366-
promptFeedback
367-
)}`;
368-
} else {
369-
errorMessage =
370-
'Model did not return candidate, could not find any prompt feedback from model as well';
371-
}
372-
throw new GoogleGenerativeAIError(errorMessage);
373339
}
374340

375341
return Promise.resolve(generateContentResult);
@@ -379,7 +345,6 @@ export class ChatSessionPreview {
379345
streamGenerateContentResultPromise: Promise<StreamGenerateContentResult>,
380346
newContent: Content[]
381347
): Promise<void> {
382-
let errorMessage: string;
383348
const streamGenerateContentResult =
384349
await streamGenerateContentResultPromise;
385350
const streamGenerateContentResponse =
@@ -393,16 +358,6 @@ export class ChatSessionPreview {
393358
const contentFromAssistant =
394359
streamGenerateContentResponse.candidates[0].content;
395360
this.historyInternal.push(contentFromAssistant);
396-
} else {
397-
const promptFeedback = streamGenerateContentResponse.promptFeedback;
398-
if (promptFeedback) {
399-
errorMessage = `Model did not return candidate, but provided prompt feedback: ${JSON.stringify(
400-
promptFeedback
401-
)}`;
402-
} else {
403-
errorMessage =
404-
'Model did not return candidate, could not find any prompt feedback from model as well';
405-
}
406361
}
407362
}
408363

src/models/test/models_test.ts

-41
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,6 @@ const fetchResponseObj = {
216216
headers: {'Content-Type': 'application/json'},
217217
url: 'url',
218218
};
219-
const TEST_EMPTY_MODEL_RESPONSE = {
220-
candidates: [],
221-
};
222219
const TEST_REQUEST_OPTIONS = {
223220
timeout: 0,
224221
};
@@ -1177,7 +1174,6 @@ describe('GenerativeModelPreview generateContentStream', () => {
11771174
describe('ChatSession', () => {
11781175
let chatSession: ChatSession;
11791176
let chatSessionWithNoArgs: ChatSession;
1180-
let chatSessionWithEmptyResponse: ChatSession;
11811177
let chatSessionWithFunctionCall: ChatSession;
11821178
let model: GenerativeModel;
11831179

@@ -1193,7 +1189,6 @@ describe('ChatSession', () => {
11931189
});
11941190
expect(await chatSession.getHistory()).toEqual(TEST_USER_CHAT_MESSAGE);
11951191
chatSessionWithNoArgs = model.startChat();
1196-
chatSessionWithEmptyResponse = model.startChat();
11971192
chatSessionWithFunctionCall = model.startChat({
11981193
tools: TEST_TOOLS_WITH_FUNCTION_DECLARATION,
11991194
});
@@ -1252,19 +1247,6 @@ describe('ChatSession', () => {
12521247
expect((await chatSessionWithNoArgs.getHistory()).length).toEqual(2);
12531248
});
12541249

1255-
it('throws an error when the model returns an empty response', async () => {
1256-
const req = 'How are you doing today?';
1257-
const expectedResult: GenerateContentResult = {
1258-
response: TEST_EMPTY_MODEL_RESPONSE,
1259-
};
1260-
spyOn(PostFetchFunctions, 'processUnary').and.resolveTo(expectedResult);
1261-
await expectAsync(
1262-
chatSessionWithEmptyResponse.sendMessage(req)
1263-
).toBeRejected();
1264-
expect((await chatSessionWithEmptyResponse.getHistory()).length).toEqual(
1265-
0
1266-
);
1267-
});
12681250
it('returns a GenerateContentResponse when passed multi-part content', async () => {
12691251
const req = TEST_MULTIPART_MESSAGE[0]['parts'];
12701252
const expectedResult: GenerateContentResult = {
@@ -1484,7 +1466,6 @@ describe('ChatSession', () => {
14841466
describe('ChatSessionPreview', () => {
14851467
let chatSession: ChatSessionPreview;
14861468
let chatSessionWithNoArgs: ChatSessionPreview;
1487-
let chatSessionWithEmptyResponse: ChatSessionPreview;
14881469
let chatSessionWithFunctionCall: ChatSessionPreview;
14891470
let model: GenerativeModelPreview;
14901471
let expectedStreamResult: StreamGenerateContentResult;
@@ -1501,7 +1482,6 @@ describe('ChatSessionPreview', () => {
15011482
});
15021483
expect(await chatSession.getHistory()).toEqual(TEST_USER_CHAT_MESSAGE);
15031484
chatSessionWithNoArgs = model.startChat();
1504-
chatSessionWithEmptyResponse = model.startChat();
15051485
chatSessionWithFunctionCall = model.startChat({
15061486
tools: TEST_TOOLS_WITH_FUNCTION_DECLARATION,
15071487
});
@@ -1563,19 +1543,6 @@ describe('ChatSessionPreview', () => {
15631543
expect((await chatSessionWithNoArgs.getHistory()).length).toEqual(2);
15641544
});
15651545

1566-
it('throws an error when the model returns an empty response', async () => {
1567-
const req = 'How are you doing today?';
1568-
const expectedResult: GenerateContentResult = {
1569-
response: TEST_EMPTY_MODEL_RESPONSE,
1570-
};
1571-
spyOn(PostFetchFunctions, 'processUnary').and.resolveTo(expectedResult);
1572-
await expectAsync(
1573-
chatSessionWithEmptyResponse.sendMessage(req)
1574-
).toBeRejected();
1575-
expect((await chatSessionWithEmptyResponse.getHistory()).length).toEqual(
1576-
0
1577-
);
1578-
});
15791546
it('returns a GenerateContentResponse when passed multi-part content', async () => {
15801547
const req = TEST_MULTIPART_MESSAGE[0]['parts'];
15811548
const expectedResult: GenerateContentResult = {
@@ -2035,8 +2002,6 @@ describe('GenerativeModelPreview when response is undefined', () => {
20352002
});
20362003

20372004
describe('GeneratvieModel when response is 4XX', () => {
2038-
const expectedErrorMessage =
2039-
'[VertexAI.ClientError]: got status: 400 Bad Request. {"code":400,"message":"request is invalid","status":"INVALID_ARGUMENT"}';
20402005
const req: GenerateContentRequest = {
20412006
contents: TEST_USER_CHAT_MESSAGE,
20422007
};
@@ -2100,8 +2065,6 @@ describe('GeneratvieModel when response is 4XX', () => {
21002065
});
21012066

21022067
describe('GeneratvieModelPreview when response is 4XX', () => {
2103-
const expectedErrorMessage =
2104-
'[VertexAI.ClientError]: got status: 400 Bad Request. {"code":400,"message":"request is invalid","status":"INVALID_ARGUMENT"}';
21052068
const req: GenerateContentRequest = {
21062069
contents: TEST_USER_CHAT_MESSAGE,
21072070
};
@@ -2165,8 +2128,6 @@ describe('GeneratvieModelPreview when response is 4XX', () => {
21652128
});
21662129

21672130
describe('GenerativeModel when response is not OK and not 4XX', () => {
2168-
const expectedErrorMessage =
2169-
'[VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"code":500,"message":"service is having downtime","status":"INTERNAL_SERVER_ERROR"}';
21702131
const req: GenerateContentRequest = {
21712132
contents: TEST_USER_CHAT_MESSAGE,
21722133
};
@@ -2230,8 +2191,6 @@ describe('GenerativeModel when response is not OK and not 4XX', () => {
22302191
});
22312192

22322193
describe('GenerativeModelPreview when response is not OK and not 4XX', () => {
2233-
const expectedErrorMessage =
2234-
'[VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"code":500,"message":"service is having downtime","status":"INTERNAL_SERVER_ERROR"}';
22352194
const req: GenerateContentRequest = {
22362195
contents: TEST_USER_CHAT_MESSAGE,
22372196
};

system_test/end_to_end_sample_test.ts

-3
Original file line numberDiff line numberDiff line change
@@ -745,9 +745,6 @@ describe('generateContent', () => {
745745
resp
746746
)}`
747747
);
748-
const functionCalls = resp.response
749-
.candidates![0].content.parts.filter((part: Part) => !!part.functionCall)
750-
.map((part: Part) => part.functionCall!);
751748
expect(
752749
resp.response.candidates![0].content.parts[0].text?.toLowerCase()
753750
).toContain(

0 commit comments

Comments
 (0)