|
70 | 70 | "import { z } from \"zod\";\n",
|
71 | 71 | "\n",
|
72 | 72 | "const joke = z.object({\n",
|
73 |
| - " setup: z.string().describe(\"The setup of the joke\"),\n", |
74 |
| - " punchline: z.string().describe(\"The punchline to the joke\"),\n", |
75 |
| - " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", |
| 73 | + " setup: z.string().describe(\"The setup of the joke\"),\n", |
| 74 | + " punchline: z.string().describe(\"The punchline to the joke\"),\n", |
| 75 | + " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", |
76 | 76 | "});\n",
|
77 | 77 | "\n",
|
78 | 78 | "const structuredLlm = model.withStructuredOutput(joke);\n",
|
|
153 | 153 | ],
|
154 | 154 | "source": [
|
155 | 155 | "const structuredLlm = model.withStructuredOutput(\n",
|
156 |
| - " {\n", |
157 |
| - " \"name\": \"joke\",\n", |
158 |
| - " \"description\": \"Joke to tell user.\",\n", |
159 |
| - " \"parameters\": {\n", |
160 |
| - " \"title\": \"Joke\",\n", |
161 |
| - " \"type\": \"object\",\n", |
162 |
| - " \"properties\": {\n", |
163 |
| - " \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n", |
164 |
| - " \"punchline\": {\"type\": \"string\", \"description\": \"The joke's punchline\"},\n", |
165 |
| - " },\n", |
166 |
| - " \"required\": [\"setup\", \"punchline\"],\n", |
167 |
| - " },\n", |
168 |
| - " }\n", |
| 156 | + " {\n", |
| 157 | + " \"name\": \"joke\",\n", |
| 158 | + " \"description\": \"Joke to tell user.\",\n", |
| 159 | + " \"parameters\": {\n", |
| 160 | + " \"title\": \"Joke\",\n", |
| 161 | + " \"type\": \"object\",\n", |
| 162 | + " \"properties\": {\n", |
| 163 | + " \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n", |
| 164 | + " \"punchline\": {\"type\": \"string\", \"description\": \"The joke's punchline\"},\n", |
| 165 | + " },\n", |
| 166 | + " \"required\": [\"setup\", \"punchline\"],\n", |
| 167 | + " },\n", |
| 168 | + " }\n", |
169 | 169 | ")\n",
|
170 | 170 | "\n",
|
171 |
| - "await structuredLlm.invoke(\"Tell me a joke about cats\")" |
| 171 | + "await structuredLlm.invoke(\"Tell me a joke about cats\", { name: \"joke\" })" |
172 | 172 | ]
|
173 | 173 | },
|
174 | 174 | {
|
|
213 | 213 | ],
|
214 | 214 | "source": [
|
215 | 215 | "const structuredLlm = model.withStructuredOutput(joke, {\n",
|
216 |
| - " method: \"json_mode\",\n", |
217 |
| - " name: \"joke\",\n", |
| 216 | + " method: \"json_mode\",\n", |
| 217 | + " name: \"joke\",\n", |
218 | 218 | "})\n",
|
219 | 219 | "\n",
|
220 | 220 | "await structuredLlm.invoke(\n",
|
221 |
| - " \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n", |
| 221 | + " \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n", |
222 | 222 | ")"
|
223 | 223 | ]
|
224 | 224 | },
|
225 | 225 | {
|
226 | 226 | "cell_type": "markdown",
|
227 |
| - "id": "5e92a98a", |
| 227 | + "id": "56278a82", |
228 | 228 | "metadata": {},
|
229 | 229 | "source": [
|
230 | 230 | "In the above example, we use OpenAI's alternate JSON mode capability along with a more specific prompt.\n",
|
231 | 231 | "\n",
|
232 |
| - "For specifics about the model you choose, peruse its entry in the [API reference pages](https://v02.api.js.langchain.com/).\n", |
| 232 | + "For specifics about the model you choose, peruse its entry in the [API reference pages](https://api.js.langchain.com/).\n", |
233 | 233 | "\n",
|
| 234 | + "### (Advanced) Raw outputs\n", |
| 235 | + "\n", |
| 236 | + "LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `includeRaw: true`. This changes the output format to contain the raw message output and the `parsed` value (if successful):" |
| 237 | + ] |
| 238 | + }, |
| 239 | + { |
| 240 | + "cell_type": "code", |
| 241 | + "execution_count": 2, |
| 242 | + "id": "46b616a4", |
| 243 | + "metadata": {}, |
| 244 | + "outputs": [ |
| 245 | + { |
| 246 | + "data": { |
| 247 | + "text/plain": [ |
| 248 | + "{\n", |
| 249 | + " raw: AIMessage {\n", |
| 250 | + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", |
| 251 | + " lc_kwargs: {\n", |
| 252 | + " content: \u001b[32m\"\"\u001b[39m,\n", |
| 253 | + " tool_calls: [\n", |
| 254 | + " {\n", |
| 255 | + " name: \u001b[32m\"joke\"\u001b[39m,\n", |
| 256 | + " args: \u001b[36m[Object]\u001b[39m,\n", |
| 257 | + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", |
| 258 | + " }\n", |
| 259 | + " ],\n", |
| 260 | + " invalid_tool_calls: [],\n", |
| 261 | + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: [ \u001b[36m[Object]\u001b[39m ] },\n", |
| 262 | + " response_metadata: {}\n", |
| 263 | + " },\n", |
| 264 | + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", |
| 265 | + " content: \u001b[32m\"\"\u001b[39m,\n", |
| 266 | + " name: \u001b[90mundefined\u001b[39m,\n", |
| 267 | + " additional_kwargs: {\n", |
| 268 | + " function_call: \u001b[90mundefined\u001b[39m,\n", |
| 269 | + " tool_calls: [\n", |
| 270 | + " {\n", |
| 271 | + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m,\n", |
| 272 | + " type: \u001b[32m\"function\"\u001b[39m,\n", |
| 273 | + " function: \u001b[36m[Object]\u001b[39m\n", |
| 274 | + " }\n", |
| 275 | + " ]\n", |
| 276 | + " },\n", |
| 277 | + " response_metadata: {\n", |
| 278 | + " tokenUsage: { completionTokens: \u001b[33m33\u001b[39m, promptTokens: \u001b[33m88\u001b[39m, totalTokens: \u001b[33m121\u001b[39m },\n", |
| 279 | + " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", |
| 280 | + " },\n", |
| 281 | + " tool_calls: [\n", |
| 282 | + " {\n", |
| 283 | + " name: \u001b[32m\"joke\"\u001b[39m,\n", |
| 284 | + " args: {\n", |
| 285 | + " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", |
| 286 | + " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", |
| 287 | + " rating: \u001b[33m7\u001b[39m\n", |
| 288 | + " },\n", |
| 289 | + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", |
| 290 | + " }\n", |
| 291 | + " ],\n", |
| 292 | + " invalid_tool_calls: [],\n", |
| 293 | + " usage_metadata: { input_tokens: \u001b[33m88\u001b[39m, output_tokens: \u001b[33m33\u001b[39m, total_tokens: \u001b[33m121\u001b[39m }\n", |
| 294 | + " },\n", |
| 295 | + " parsed: {\n", |
| 296 | + " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", |
| 297 | + " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", |
| 298 | + " rating: \u001b[33m7\u001b[39m\n", |
| 299 | + " }\n", |
| 300 | + "}" |
| 301 | + ] |
| 302 | + }, |
| 303 | + "execution_count": 2, |
| 304 | + "metadata": {}, |
| 305 | + "output_type": "execute_result" |
| 306 | + } |
| 307 | + ], |
| 308 | + "source": [ |
| 309 | + "const joke = z.object({\n", |
| 310 | + " setup: z.string().describe(\"The setup of the joke\"),\n", |
| 311 | + " punchline: z.string().describe(\"The punchline to the joke\"),\n", |
| 312 | + " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", |
| 313 | + "});\n", |
| 314 | + "\n", |
| 315 | + "const structuredLlm = model.withStructuredOutput(joke, { includeRaw: true, name: \"joke\" });\n", |
| 316 | + "\n", |
| 317 | + "await structuredLlm.invoke(\"Tell me a joke about cats\");" |
| 318 | + ] |
| 319 | + }, |
| 320 | + { |
| 321 | + "cell_type": "markdown", |
| 322 | + "id": "5e92a98a", |
| 323 | + "metadata": {}, |
| 324 | + "source": [ |
234 | 325 | "## Prompting techniques\n",
|
235 | 326 | "\n",
|
236 | 327 | "You can also prompt models to outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the models. This is the only option for models that don't support `.with_structured_output()` or other built-in approaches.\n",
|
|
0 commit comments