@@ -44,23 +44,371 @@ export interface ChatTogetherAIInput
44
44
}
45
45
46
46
/**
47
- * Wrapper around TogetherAI API for large language models fine-tuned for chat
47
+ * TogetherAI chat model integration.
48
48
*
49
- * TogetherAI API is compatible to the OpenAI API with some limitations. View the
49
+ * The TogetherAI API is compatible to the OpenAI API with some limitations. View the
50
50
* full API ref at:
51
51
* @link {https://docs.together.ai/reference/chat-completions}
52
52
*
53
- * To use, you should have the `TOGETHER_AI_API_KEY` environment variable set.
54
- * @example
53
+ * Setup:
54
+ * Install `@langchain/community` and set an environment variable named `TOGETHER_AI_API_KEY`.
55
+ *
56
+ * ```bash
57
+ * npm install @langchain/community
58
+ * export TOGETHER_AI_API_KEY="your-api-key"
59
+ * ```
60
+ *
61
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_community.chat_models_togetherai.ChatTogetherAI.html#constructor)
62
+ *
63
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_community.chat_models_togetherai.ChatTogetherAICallOptions.html)
64
+ *
65
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
66
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
67
+ *
68
+ * ```typescript
69
+ * // When calling `.bind`, call options should be passed via the first argument
70
+ * const llmWithArgsBound = llm.bind({
71
+ * stop: ["\n"],
72
+ * tools: [...],
73
+ * });
74
+ *
75
+ * // When calling `.bindTools`, call options should be passed via the second argument
76
+ * const llmWithTools = llm.bindTools(
77
+ * [...],
78
+ * {
79
+ * tool_choice: "auto",
80
+ * }
81
+ * );
82
+ * ```
83
+ *
84
+ * ## Examples
85
+ *
86
+ * <details open>
87
+ * <summary><strong>Instantiate</strong></summary>
88
+ *
55
89
* ```typescript
56
- * const model = new ChatTogetherAI({
57
- * temperature: 0.9,
58
- * apiKey: process.env.TOGETHER_AI_API_KEY,
90
+ * import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
91
+ *
92
+ * const llm = new ChatTogetherAI({
93
+ * model: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
94
+ * temperature: 0,
95
+ * // other params...
59
96
* });
97
+ * ```
98
+ * </details>
99
+ *
100
+ * <br />
101
+ *
102
+ * <details>
103
+ * <summary><strong>Invoking</strong></summary>
104
+ *
105
+ * ```typescript
106
+ * const input = `Translate "I love programming" into French.`;
107
+ *
108
+ * // Models also accept a list of chat messages or a formatted prompt
109
+ * const result = await llm.invoke(input);
110
+ * console.log(result);
111
+ * ```
112
+ *
113
+ * ```txt
114
+ * AIMessage {
115
+ * "id": "8b23ea7bcc4c924b-MUC",
116
+ * "content": "\"J'adore programmer\"",
117
+ * "additional_kwargs": {},
118
+ * "response_metadata": {
119
+ * "tokenUsage": {
120
+ * "completionTokens": 8,
121
+ * "promptTokens": 19,
122
+ * "totalTokens": 27
123
+ * },
124
+ * "finish_reason": "eos"
125
+ * },
126
+ * "tool_calls": [],
127
+ * "invalid_tool_calls": [],
128
+ * "usage_metadata": {
129
+ * "input_tokens": 19,
130
+ * "output_tokens": 8,
131
+ * "total_tokens": 27
132
+ * }
133
+ * }
134
+ * ```
135
+ * </details>
136
+ *
137
+ * <br />
138
+ *
139
+ * <details>
140
+ * <summary><strong>Streaming Chunks</strong></summary>
141
+ *
142
+ * ```typescript
143
+ * for await (const chunk of await llm.stream(input)) {
144
+ * console.log(chunk);
145
+ * }
146
+ * ```
147
+ *
148
+ * ```txt
149
+ * AIMessageChunk {
150
+ * "id": "8b23eb602fb19263-MUC",
151
+ * "content": "\"",
152
+ * "additional_kwargs": {},
153
+ * "response_metadata": {
154
+ * "prompt": 0,
155
+ * "completion": 0,
156
+ * "finish_reason": null
157
+ * },
158
+ * "tool_calls": [],
159
+ * "tool_call_chunks": [],
160
+ * "invalid_tool_calls": []
161
+ * }
162
+ * AIMessageChunk {
163
+ * "id": "8b23eb602fb19263-MUC",
164
+ * "content": "J",
165
+ * "additional_kwargs": {},
166
+ * "response_metadata": {
167
+ * "prompt": 0,
168
+ * "completion": 0,
169
+ * "finish_reason": null
170
+ * },
171
+ * "tool_calls": [],
172
+ * "tool_call_chunks": [],
173
+ * "invalid_tool_calls": []
174
+ * }
175
+ * AIMessageChunk {
176
+ * "id": "8b23eb602fb19263-MUC",
177
+ * "content": "'",
178
+ * "additional_kwargs": {},
179
+ * "response_metadata": {
180
+ * "prompt": 0,
181
+ * "completion": 0,
182
+ * "finish_reason": null
183
+ * },
184
+ * "tool_calls": [],
185
+ * "tool_call_chunks": [],
186
+ * "invalid_tool_calls": []
187
+ * }
188
+ * AIMessageChunk {
189
+ * "id": "8b23eb602fb19263-MUC",
190
+ * "content": "ad",
191
+ * "additional_kwargs": {},
192
+ * "response_metadata": {
193
+ * "prompt": 0,
194
+ * "completion": 0,
195
+ * "finish_reason": null
196
+ * },
197
+ * "tool_calls": [],
198
+ * "tool_call_chunks": [],
199
+ * "invalid_tool_calls": []
200
+ * }
201
+ * AIMessageChunk {
202
+ * "id": "8b23eb602fb19263-MUC",
203
+ * "content": "ore",
204
+ * "additional_kwargs": {},
205
+ * "response_metadata": {
206
+ * "prompt": 0,
207
+ * "completion": 0,
208
+ * "finish_reason": null
209
+ * },
210
+ * "tool_calls": [],
211
+ * "tool_call_chunks": [],
212
+ * "invalid_tool_calls": []
213
+ * }
214
+ * AIMessageChunk {
215
+ * "id": "8b23eb602fb19263-MUC",
216
+ * "content": " programmer",
217
+ * "additional_kwargs": {},
218
+ * "response_metadata": {
219
+ * "prompt": 0,
220
+ * "completion": 0,
221
+ * "finish_reason": null
222
+ * },
223
+ * "tool_calls": [],
224
+ * "tool_call_chunks": [],
225
+ * "invalid_tool_calls": []
226
+ * }
227
+ * AIMessageChunk {
228
+ * "id": "8b23eb602fb19263-MUC",
229
+ * "content": "\"",
230
+ * "additional_kwargs": {},
231
+ * "response_metadata": {
232
+ * "prompt": 0,
233
+ * "completion": 0,
234
+ * "finish_reason": null
235
+ * },
236
+ * "tool_calls": [],
237
+ * "tool_call_chunks": [],
238
+ * "invalid_tool_calls": []
239
+ * }
240
+ * AIMessageChunk {
241
+ * "id": "8b23eb602fb19263-MUC",
242
+ * "content": "",
243
+ * "additional_kwargs": {},
244
+ * "response_metadata": {
245
+ * "prompt": 0,
246
+ * "completion": 0,
247
+ * "finish_reason": "eos"
248
+ * },
249
+ * "tool_calls": [],
250
+ * "tool_call_chunks": [],
251
+ * "invalid_tool_calls": []
252
+ * }
253
+ * AIMessageChunk {
254
+ * "content": "",
255
+ * "additional_kwargs": {},
256
+ * "response_metadata": {},
257
+ * "tool_calls": [],
258
+ * "tool_call_chunks": [],
259
+ * "invalid_tool_calls": [],
260
+ * "usage_metadata": {
261
+ * "input_tokens": 19,
262
+ * "output_tokens": 8,
263
+ * "total_tokens": 27
264
+ * }
265
+ * }
266
+ * ```
267
+ * </details>
268
+ *
269
+ * <br />
270
+ *
271
+ * <details>
272
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
273
+ *
274
+ * ```typescript
275
+ * import { AIMessageChunk } from '@langchain/core/messages';
276
+ * import { concat } from '@langchain/core/utils/stream';
277
+ *
278
+ * const stream = await llm.stream(input);
279
+ * let full: AIMessageChunk | undefined;
280
+ * for await (const chunk of stream) {
281
+ * full = !full ? chunk : concat(full, chunk);
282
+ * }
283
+ * console.log(full);
284
+ * ```
285
+ *
286
+ * ```txt
287
+ * AIMessageChunk {
288
+ * "id": "8b23ecd42e469236-MUC",
289
+ * "content": "\"J'adore programmer\"",
290
+ * "additional_kwargs": {},
291
+ * "response_metadata": {
292
+ * "prompt": 0,
293
+ * "completion": 0,
294
+ * "finish_reason": "eos"
295
+ * },
296
+ * "tool_calls": [],
297
+ * "tool_call_chunks": [],
298
+ * "invalid_tool_calls": [],
299
+ * "usage_metadata": {
300
+ * "input_tokens": 19,
301
+ * "output_tokens": 8,
302
+ * "total_tokens": 27
303
+ * }
304
+ * }
305
+ * ```
306
+ * </details>
307
+ *
308
+ * <br />
309
+ *
310
+ * <details>
311
+ * <summary><strong>Bind tools</strong></summary>
312
+ *
313
+ * ```typescript
314
+ * import { z } from 'zod';
315
+ *
316
+ * const GetWeather = {
317
+ * name: "GetWeather",
318
+ * description: "Get the current weather in a given location",
319
+ * schema: z.object({
320
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
321
+ * }),
322
+ * }
323
+ *
324
+ * const GetPopulation = {
325
+ * name: "GetPopulation",
326
+ * description: "Get the current population in a given location",
327
+ * schema: z.object({
328
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
329
+ * }),
330
+ * }
331
+ *
332
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
333
+ * const aiMsg = await llmWithTools.invoke(
334
+ * "Which city is hotter today and which is bigger: LA or NY? Respond with JSON and use tools."
335
+ * );
336
+ * console.log(aiMsg.tool_calls);
337
+ * ```
338
+ *
339
+ * ```txt
340
+ * [
341
+ * {
342
+ * name: 'GetWeather',
343
+ * args: { location: 'Los Angeles' },
344
+ * type: 'tool_call',
345
+ * id: 'call_q8i4zx1udqjjnou2bzbrg8ms'
346
+ * }
347
+ * ]
348
+ * ```
349
+ * </details>
350
+ *
351
+ * <br />
352
+ *
353
+ * <details>
354
+ * <summary><strong>Structured Output</strong></summary>
355
+ *
356
+ * ```typescript
357
+ * import { z } from 'zod';
358
+ *
359
+ * const Joke = z.object({
360
+ * setup: z.string().describe("The setup of the joke"),
361
+ * punchline: z.string().describe("The punchline to the joke"),
362
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
363
+ * }).describe('Joke to tell user.');
60
364
*
61
- * const response = await model.invoke([new HumanMessage("Hello there!")]);
62
- * console.log(response);
365
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
366
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
367
+ * console.log(jokeResult);
63
368
* ```
369
+ *
370
+ * ```txt
371
+ * {
372
+ * setup: 'Why did the cat join a band',
373
+ * punchline: 'Because it wanted to be the purr-cussionist'
374
+ * }
375
+ * ```
376
+ * </details>
377
+ *
378
+ * <br />
379
+ *
380
+ * <details>
381
+ * <summary><strong>Usage Metadata</strong></summary>
382
+ *
383
+ * ```typescript
384
+ * const aiMsgForMetadata = await llm.invoke(input);
385
+ * console.log(aiMsgForMetadata.usage_metadata);
386
+ * ```
387
+ *
388
+ * ```txt
389
+ * { input_tokens: 19, output_tokens: 65, total_tokens: 84 }
390
+ * ```
391
+ * </details>
392
+ *
393
+ * <br />
394
+ *
395
+ * <details>
396
+ * <summary><strong>Response Metadata</strong></summary>
397
+ *
398
+ * ```typescript
399
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
400
+ * console.log(aiMsgForResponseMetadata.response_metadata);
401
+ * ```
402
+ *
403
+ * ```txt
404
+ * {
405
+ * tokenUsage: { completionTokens: 91, promptTokens: 19, totalTokens: 110 },
406
+ * finish_reason: 'eos'
407
+ * }
408
+ * ```
409
+ * </details>
410
+ *
411
+ * <br />
64
412
*/
65
413
export class ChatTogetherAI extends ChatOpenAI < ChatTogetherAICallOptions > {
66
414
static lc_name ( ) {
0 commit comments