Skip to content

Commit 0767f11

Browse files
fix(openai): Responses API: fix function call ID handling on stream (#8107)
2 parents da68851 + 272efdb commit 0767f11

File tree

2 files changed

+53
-22
lines changed

2 files changed

+53
-22
lines changed

libs/langchain-openai/src/chat_models.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -831,7 +831,7 @@ function _convertOpenAIResponsesDeltaToBaseMessageChunk(
831831
type: "tool_call_chunk",
832832
name: chunk.item.name,
833833
args: chunk.item.arguments,
834-
id: chunk.item.id,
834+
id: chunk.item.call_id,
835835
index: chunk.output_index,
836836
});
837837

libs/langchain-openai/src/tests/chat_models_responses.int.test.ts

Lines changed: 52 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@ import {
66
BaseMessage,
77
BaseMessageChunk,
88
HumanMessage,
9+
ToolMessage,
910
isAIMessage,
1011
isAIMessageChunk,
1112
} from "@langchain/core/messages";
1213
import { tool } from "@langchain/core/tools";
1314
import { z } from "zod";
15+
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
1416
import { ChatOpenAI } from "../chat_models.js";
1517
import { REASONING_OUTPUT_MESSAGES } from "./data/computer-use-inputs.js";
1618
import { ChatOpenAIReasoningSummary } from "../types.js";
@@ -120,31 +122,60 @@ test("Test with built-in web search", async () => {
120122
assertResponse(boundResponse);
121123
});
122124

123-
test("Test function calling", async () => {
124-
const multiply = tool((args) => args.x * args.y, {
125-
name: "multiply",
126-
description: "Multiply two numbers",
127-
schema: z.object({ x: z.number(), y: z.number() }),
128-
});
125+
test.each(["stream", "invoke"])(
126+
"Test function calling, %s",
127+
async (invocationType: string) => {
128+
const multiply = tool((args) => args.x * args.y, {
129+
name: "multiply",
130+
description: "Multiply two numbers",
131+
schema: z.object({ x: z.number(), y: z.number() }),
132+
});
129133

130-
const llm = new ChatOpenAI({ modelName: "gpt-4o-mini" }).bindTools([
131-
multiply,
132-
{ type: "web_search_preview" },
133-
]);
134+
const llm = new ChatOpenAI({ modelName: "gpt-4o-mini" }).bindTools([
135+
multiply,
136+
{ type: "web_search_preview" },
137+
]);
138+
139+
function invoke(
140+
invocationType: string,
141+
prompt: BaseLanguageModelInput
142+
): Promise<AIMessage | AIMessageChunk> {
143+
if (invocationType === "invoke") {
144+
return llm.invoke(prompt);
145+
}
134146

135-
const msg = (await llm.invoke("whats 5 * 4")) as AIMessage;
136-
expect(msg.tool_calls).toMatchObject([
137-
{ name: "multiply", args: { x: 5, y: 4 } },
138-
]);
147+
return concatStream(llm.stream(prompt));
148+
}
139149

140-
const full = await concatStream(llm.stream("whats 5 * 4"));
141-
expect(full?.tool_calls).toMatchObject([
142-
{ name: "multiply", args: { x: 5, y: 4 } },
143-
]);
150+
const messages = [new HumanMessage("whats 5 * 4")];
144151

145-
const response = await llm.invoke("whats some good news from today");
146-
assertResponse(response);
147-
});
152+
const aiMessage = (await invoke(invocationType, messages)) as AIMessage;
153+
154+
messages.push(aiMessage);
155+
156+
expect(aiMessage.tool_calls).toMatchObject([
157+
{ name: "multiply", args: { x: 5, y: 4 } },
158+
]);
159+
160+
const toolMessage: ToolMessage = await multiply.invoke(
161+
aiMessage.tool_calls![0]
162+
);
163+
messages.push(toolMessage);
164+
165+
expect(toolMessage.tool_call_id).toMatch(/^call_[a-zA-Z0-9]+$/);
166+
expect(toolMessage.tool_call_id).toEqual(aiMessage.tool_calls![0].id);
167+
168+
const finalAiMessage = await invoke(invocationType, messages);
169+
170+
assertResponse(finalAiMessage);
171+
172+
const noToolCallResponse = await invoke(
173+
invocationType,
174+
"whats some good news from today"
175+
);
176+
assertResponse(noToolCallResponse);
177+
}
178+
);
148179

149180
test("Test structured output", async () => {
150181
const schema = z.object({ response: z.string() });

0 commit comments

Comments
 (0)