Skip to content

Commit 4604a41

Browse files
welljsjsjacoblee93
andauthored
core[patch]: Fix #5873 (LCEL issue when streaming from LLMs) (#5874)
* Fix #5873 (LCEL issue when streaming from LLMs) Fix #5873. * Add test --------- Co-authored-by: jacoblee93 <[email protected]>
1 parent 16fed47 commit 4604a41

File tree

2 files changed

+16
-2
lines changed

2 files changed

+16
-2
lines changed

langchain-core/src/language_models/llms.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ export abstract class BaseLLM<
156156
});
157157
try {
158158
for await (const chunk of this._streamResponseChunks(
159-
input.toString(),
159+
prompt.toString(),
160160
callOptions,
161161
runManagers?.[0]
162162
)) {

langchain-core/src/language_models/tests/llms.test.ts

+15-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
/* eslint-disable no-promise-executor-return */
22

33
import { test } from "@jest/globals";
4-
import { FakeLLM } from "../../utils/testing/index.js";
4+
import { FakeLLM, FakeStreamingLLM } from "../../utils/testing/index.js";
5+
import { HumanMessagePromptTemplate } from "../../prompts/chat.js";
56

67
test("Test FakeLLM uses callbacks", async () => {
78
const model = new FakeLLM({});
@@ -40,3 +41,16 @@ test("Test FakeLLM uses callbacks with a cache", async () => {
4041
expect(response).toEqual(response2);
4142
expect(response2).toEqual(acc);
4243
});
44+
45+
test("Test FakeStreamingLLM works when streaming through a prompt", async () => {
46+
const prompt = HumanMessagePromptTemplate.fromTemplate("hello there {name}");
47+
const model = new FakeStreamingLLM({});
48+
const chain = prompt.pipe(model);
49+
const stream = await chain.stream({ name: "test" });
50+
const chunks = [];
51+
for await (const chunk of stream) {
52+
chunks.push(chunk);
53+
}
54+
expect(chunks.length).toBeGreaterThan(1);
55+
expect(chunks.join("")).toEqual("Human: hello there test");
56+
});

0 commit comments

Comments
 (0)