|
100 | 100 | {
|
101 | 101 | "data": {
|
102 | 102 | "text/plain": [
|
103 |
| - "\u001b[32m`[{\"title\":\"Weather in San Francisco\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'n`\u001b[39m... 1111 more characters" |
| 103 | + "\u001b[32m`[{\"title\":\"Weather in San Francisco\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'n`\u001b[39m... 1347 more characters" |
104 | 104 | ]
|
105 | 105 | },
|
106 | 106 | "execution_count": 1,
|
|
109 | 109 | }
|
110 | 110 | ],
|
111 | 111 | "source": [
|
| 112 | + "import \"cheerio\"; // This is required in notebooks to use the `CheerioWebBaseLoader`\n", |
112 | 113 | "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\"\n",
|
113 | 114 | "\n",
|
114 | 115 | "const search = new TavilySearchResults({\n",
|
|
152 | 153 | }
|
153 | 154 | ],
|
154 | 155 | "source": [
|
155 |
| - "import \"cheerio\"; // This is required in notebooks to use the `CheerioWebBaseLoader`\n", |
156 |
| - "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";\n", |
| 156 | + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", |
157 | 157 | "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n",
|
158 | 158 | "import { OpenAIEmbeddings } from \"@langchain/openai\";\n",
|
159 | 159 | "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n",
|
160 | 160 | "\n",
|
161 |
| - "const loader = new CheerioWebBaseLoader(\"https://docs.smith.langchain.com/overview\")\n", |
162 |
| - "const docs = await loader.load()\n", |
163 |
| - "const documents = await new RecursiveCharacterTextSplitter(\n", |
164 |
| - " {\n", |
165 |
| - " chunkSize: 1000,\n", |
166 |
| - " chunkOverlap: 200\n", |
167 |
| - " }\n", |
168 |
| - ").splitDocuments(docs)\n", |
169 |
| - "const vectorStore = await MemoryVectorStore.fromDocuments(documents, new OpenAIEmbeddings())\n", |
| 161 | + "const loader = new CheerioWebBaseLoader(\"https://docs.smith.langchain.com/overview\");\n", |
| 162 | + "const docs = await loader.load();\n", |
| 163 | + "const splitter = new RecursiveCharacterTextSplitter(\n", |
| 164 | + " {\n", |
| 165 | + " chunkSize: 1000,\n", |
| 166 | + " chunkOverlap: 200\n", |
| 167 | + " }\n", |
| 168 | + ");\n", |
| 169 | + "const documents = await splitter.splitDocuments(docs);\n", |
| 170 | + "const vectorStore = await MemoryVectorStore.fromDocuments(documents, new OpenAIEmbeddings());\n", |
170 | 171 | "const retriever = vectorStore.asRetriever();\n",
|
171 | 172 | "\n",
|
172 |
| - "(await retriever.invoke(\"how to upload a dataset\"))[0]" |
| 173 | + "(await retriever.invoke(\"how to upload a dataset\"))[0];" |
173 | 174 | ]
|
174 | 175 | },
|
175 | 176 | {
|
|
258 | 259 | }
|
259 | 260 | ],
|
260 | 261 | "source": [
|
| 262 | + "import { ChatOpenAI } from \"@langchain/openai\";\n", |
| 263 | + "const model = new ChatOpenAI({ model: \"gpt-4\", temperature: 0 })\n", |
| 264 | + "\n", |
261 | 265 | "import { HumanMessage } from \"@langchain/core/messages\";\n",
|
262 | 266 | "\n",
|
263 | 267 | "const response = await model.invoke([new HumanMessage(\"hi!\")]);\n",
|
|
336 | 340 | " {\n",
|
337 | 341 | " \"name\": \"tavily_search_results_json\",\n",
|
338 | 342 | " \"args\": {\n",
|
339 |
| - " \"input\": \"weather in San Francisco\"\n", |
| 343 | + " \"input\": \"current weather in San Francisco\"\n", |
340 | 344 | " },\n",
|
341 |
| - " \"id\": \"call_y0nn6mbVCV5paX6RrqqFUqdC\"\n", |
| 345 | + " \"id\": \"call_VcSjZAZkEOx9lcHNZNXAjXkm\"\n", |
342 | 346 | " }\n",
|
343 | 347 | "]\n"
|
344 | 348 | ]
|
|
370 | 374 | "\n",
|
371 | 375 | "Now that we have defined the tools and the LLM, we can create the agent. We will be using a tool calling agent - for more information on this type of agent, as well as other options, see [this guide](/docs/concepts/#agent_types/).\n",
|
372 | 376 | "\n",
|
373 |
| - "We can first choose the prompt we want to use to guide the agent.\n", |
374 |
| - "\n", |
375 |
| - "If you want to see the contents of this prompt in the hub, you can go to:\n", |
376 |
| - "\n", |
377 |
| - "[https://smith.langchain.com/hub/hwchase17/openai-functions-agent](https://smith.langchain.com/hub/hwchase17/openai-functions-agent)" |
| 377 | + "We can first choose the prompt we want to use to guide the agent:" |
378 | 378 | ]
|
379 | 379 | },
|
380 | 380 | {
|
|
394 | 394 | " prompt: PromptTemplate {\n",
|
395 | 395 | " lc_serializable: true,\n",
|
396 | 396 | " lc_kwargs: {\n",
|
397 |
| - " template: \"You are a helpful assistant\",\n", |
398 | 397 | " inputVariables: [],\n",
|
399 | 398 | " templateFormat: \"f-string\",\n",
|
400 |
| - " partialVariables: {}\n", |
| 399 | + " template: \"You are a helpful assistant\"\n", |
401 | 400 | " },\n",
|
402 | 401 | " lc_runnable: true,\n",
|
403 | 402 | " name: undefined,\n",
|
404 | 403 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n",
|
405 | 404 | " inputVariables: [],\n",
|
406 | 405 | " outputParser: undefined,\n",
|
407 |
| - " partialVariables: {},\n", |
408 |
| - " template: \"You are a helpful assistant\",\n", |
| 406 | + " partialVariables: undefined,\n", |
409 | 407 | " templateFormat: \"f-string\",\n",
|
| 408 | + " template: \"You are a helpful assistant\",\n", |
410 | 409 | " validateTemplate: true\n",
|
411 | 410 | " }\n",
|
412 | 411 | " },\n",
|
|
418 | 417 | " prompt: PromptTemplate {\n",
|
419 | 418 | " lc_serializable: true,\n",
|
420 | 419 | " lc_kwargs: {\n",
|
421 |
| - " template: \"You are a helpful assistant\",\n", |
422 | 420 | " inputVariables: [],\n",
|
423 | 421 | " templateFormat: \"f-string\",\n",
|
424 |
| - " partialVariables: {}\n", |
| 422 | + " template: \"You are a helpful assistant\"\n", |
425 | 423 | " },\n",
|
426 | 424 | " lc_runnable: true,\n",
|
427 | 425 | " name: undefined,\n",
|
428 | 426 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n",
|
429 | 427 | " inputVariables: [],\n",
|
430 | 428 | " outputParser: undefined,\n",
|
431 |
| - " partialVariables: {},\n", |
432 |
| - " template: \"You are a helpful assistant\",\n", |
| 429 | + " partialVariables: undefined,\n", |
433 | 430 | " templateFormat: \"f-string\",\n",
|
| 431 | + " template: \"You are a helpful assistant\",\n", |
434 | 432 | " validateTemplate: true\n",
|
435 | 433 | " },\n",
|
436 | 434 | " messageClass: undefined,\n",
|
437 | 435 | " chatMessageClass: undefined\n",
|
438 | 436 | " },\n",
|
439 | 437 | " MessagesPlaceholder {\n",
|
440 | 438 | " lc_serializable: true,\n",
|
441 |
| - " lc_kwargs: { optional: true, variableName: \"chat_history\" },\n", |
| 439 | + " lc_kwargs: { variableName: \"chat_history\", optional: true },\n", |
442 | 440 | " lc_runnable: true,\n",
|
443 | 441 | " name: undefined,\n",
|
444 | 442 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n",
|
|
451 | 449 | " prompt: PromptTemplate {\n",
|
452 | 450 | " lc_serializable: true,\n",
|
453 | 451 | " lc_kwargs: {\n",
|
454 |
| - " template: \"{input}\",\n", |
455 | 452 | " inputVariables: [Array],\n",
|
456 | 453 | " templateFormat: \"f-string\",\n",
|
457 |
| - " partialVariables: {}\n", |
| 454 | + " template: \"{input}\"\n", |
458 | 455 | " },\n",
|
459 | 456 | " lc_runnable: true,\n",
|
460 | 457 | " name: undefined,\n",
|
461 | 458 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n",
|
462 | 459 | " inputVariables: [ \"input\" ],\n",
|
463 | 460 | " outputParser: undefined,\n",
|
464 |
| - " partialVariables: {},\n", |
465 |
| - " template: \"{input}\",\n", |
| 461 | + " partialVariables: undefined,\n", |
466 | 462 | " templateFormat: \"f-string\",\n",
|
| 463 | + " template: \"{input}\",\n", |
467 | 464 | " validateTemplate: true\n",
|
468 | 465 | " }\n",
|
469 | 466 | " },\n",
|
|
475 | 472 | " prompt: PromptTemplate {\n",
|
476 | 473 | " lc_serializable: true,\n",
|
477 | 474 | " lc_kwargs: {\n",
|
478 |
| - " template: \"{input}\",\n", |
479 | 475 | " inputVariables: [ \"input\" ],\n",
|
480 | 476 | " templateFormat: \"f-string\",\n",
|
481 |
| - " partialVariables: {}\n", |
| 477 | + " template: \"{input}\"\n", |
482 | 478 | " },\n",
|
483 | 479 | " lc_runnable: true,\n",
|
484 | 480 | " name: undefined,\n",
|
485 | 481 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n",
|
486 | 482 | " inputVariables: [ \"input\" ],\n",
|
487 | 483 | " outputParser: undefined,\n",
|
488 |
| - " partialVariables: {},\n", |
489 |
| - " template: \"{input}\",\n", |
| 484 | + " partialVariables: undefined,\n", |
490 | 485 | " templateFormat: \"f-string\",\n",
|
| 486 | + " template: \"{input}\",\n", |
491 | 487 | " validateTemplate: true\n",
|
492 | 488 | " },\n",
|
493 | 489 | " messageClass: undefined,\n",
|
494 | 490 | " chatMessageClass: undefined\n",
|
495 | 491 | " },\n",
|
496 | 492 | " MessagesPlaceholder {\n",
|
497 | 493 | " lc_serializable: true,\n",
|
498 |
| - " lc_kwargs: { optional: false, variableName: \"agent_scratchpad\" },\n", |
| 494 | + " lc_kwargs: { variableName: \"agent_scratchpad\", optional: true },\n", |
499 | 495 | " lc_runnable: true,\n",
|
500 | 496 | " name: undefined,\n",
|
501 | 497 | " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n",
|
502 | 498 | " variableName: \"agent_scratchpad\",\n",
|
503 |
| - " optional: false\n", |
| 499 | + " optional: true\n", |
504 | 500 | " }\n",
|
505 | 501 | "]\n"
|
506 | 502 | ]
|
507 | 503 | }
|
508 | 504 | ],
|
509 | 505 | "source": [
|
510 | 506 | "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
|
511 |
| - "import { pull } from \"langchain/hub\";\n", |
512 | 507 | "\n",
|
513 |
| - "// Get the prompt to use - you can modify this!\n", |
514 |
| - "const prompt = await pull<ChatPromptTemplate>(\"hwchase17/openai-functions-agent\");\n", |
| 508 | + "const prompt = ChatPromptTemplate.fromMessages([\n", |
| 509 | + " [\"system\", \"You are a helpful assistant\"],\n", |
| 510 | + " [\"placeholder\", \"{chat_history}\"],\n", |
| 511 | + " [\"human\", \"{input}\"],\n", |
| 512 | + " [\"placeholder\", \"{agent_scratchpad}\"],\n", |
| 513 | + "]);\n", |
515 | 514 | "\n",
|
516 | 515 | "console.log(prompt.promptMessages);"
|
517 | 516 | ]
|
|
617 | 616 | "text/plain": [
|
618 | 617 | "{\n",
|
619 | 618 | " input: \u001b[32m\"how can langsmith help with testing?\"\u001b[39m,\n",
|
620 |
| - " output: \u001b[32m\"LangSmith can help with testing by providing a platform for building production-grade LLM applicatio\"\u001b[39m... 880 more characters\n", |
| 619 | + " output: \u001b[32m\"LangSmith can be a valuable tool for testing in several ways:\\n\"\u001b[39m +\n", |
| 620 | + " \u001b[32m\"\\n\"\u001b[39m +\n", |
| 621 | + " \u001b[32m\"1. **Logging Traces**: LangSmith prov\"\u001b[39m... 960 more characters\n", |
621 | 622 | "}"
|
622 | 623 | ]
|
623 | 624 | },
|
|
651 | 652 | "text/plain": [
|
652 | 653 | "{\n",
|
653 | 654 | " input: \u001b[32m\"whats the weather in sf?\"\u001b[39m,\n",
|
654 |
| - " output: \u001b[32m\"The current weather in San Francisco is partly cloudy with a temperature of 64.0°F (17.8°C). The win\"\u001b[39m... 112 more characters\n", |
| 655 | + " output: \u001b[32m\"The current weather in San Francisco, California is partly cloudy with a temperature of 12.2°C (54.0\"\u001b[39m... 176 more characters\n", |
655 | 656 | "}"
|
656 | 657 | ]
|
657 | 658 | },
|
|
753 | 754 | " }\n",
|
754 | 755 | " ],\n",
|
755 | 756 | " input: \u001b[32m\"what's my name?\"\u001b[39m,\n",
|
756 |
| - " output: \u001b[32m\"Your name is Bob! How can I help you, Bob?\"\u001b[39m\n", |
| 757 | + " output: \u001b[32m\"Your name is Bob. How can I assist you further?\"\u001b[39m\n", |
757 | 758 | "}"
|
758 | 759 | ]
|
759 | 760 | },
|
|
785 | 786 | "\n",
|
786 | 787 | "Because we have multiple inputs, we need to specify two things:\n",
|
787 | 788 | "\n",
|
788 |
| - "- `input_messages_key`: The input key to use to add to the conversation history.\n", |
789 |
| - "- `history_messages_key`: The key to add the loaded messages into.\n", |
| 789 | + "- `inputMessagesKey`: The input key to use to add to the conversation history.\n", |
| 790 | + "- `historyMessagesKey`: The key to add the loaded messages into.\n", |
790 | 791 | "\n",
|
791 | 792 | "For more information on how to use this, see [this guide](/docs/how_to/message_history). "
|
792 | 793 | ]
|
|
819 | 820 | " AIMessage {\n",
|
820 | 821 | " lc_serializable: \u001b[33mtrue\u001b[39m,\n",
|
821 | 822 | " lc_kwargs: {\n",
|
822 |
| - " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m,\n", |
| 823 | + " content: \u001b[32m\"Hello, Bob! How can I assist you today?\"\u001b[39m,\n", |
823 | 824 | " tool_calls: [],\n",
|
824 | 825 | " invalid_tool_calls: [],\n",
|
825 | 826 | " additional_kwargs: {},\n",
|
826 | 827 | " response_metadata: {}\n",
|
827 | 828 | " },\n",
|
828 | 829 | " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
|
829 |
| - " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m,\n", |
| 830 | + " content: \u001b[32m\"Hello, Bob! How can I assist you today?\"\u001b[39m,\n", |
830 | 831 | " name: \u001b[90mundefined\u001b[39m,\n",
|
831 | 832 | " additional_kwargs: {},\n",
|
832 | 833 | " response_metadata: {},\n",
|
833 | 834 | " tool_calls: [],\n",
|
834 | 835 | " invalid_tool_calls: []\n",
|
835 | 836 | " }\n",
|
836 | 837 | " ],\n",
|
837 |
| - " output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", |
| 838 | + " output: \u001b[32m\"Hello, Bob! How can I assist you today?\"\u001b[39m\n", |
838 | 839 | "}"
|
839 | 840 | ]
|
840 | 841 | },
|
|
898 | 899 | " AIMessage {\n",
|
899 | 900 | " lc_serializable: \u001b[33mtrue\u001b[39m,\n",
|
900 | 901 | " lc_kwargs: {\n",
|
901 |
| - " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m,\n", |
| 902 | + " content: \u001b[32m\"Hello, Bob! How can I assist you today?\"\u001b[39m,\n", |
902 | 903 | " tool_calls: [],\n",
|
903 | 904 | " invalid_tool_calls: [],\n",
|
904 | 905 | " additional_kwargs: {},\n",
|
905 | 906 | " response_metadata: {}\n",
|
906 | 907 | " },\n",
|
907 | 908 | " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
|
908 |
| - " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m,\n", |
| 909 | + " content: \u001b[32m\"Hello, Bob! How can I assist you today?\"\u001b[39m,\n", |
909 | 910 | " name: \u001b[90mundefined\u001b[39m,\n",
|
910 | 911 | " additional_kwargs: {},\n",
|
911 | 912 | " response_metadata: {},\n",
|
|
928 | 929 | " AIMessage {\n",
|
929 | 930 | " lc_serializable: \u001b[33mtrue\u001b[39m,\n",
|
930 | 931 | " lc_kwargs: {\n",
|
931 |
| - " content: \u001b[32m\"Your name is Bob! How can I help you, Bob?\"\u001b[39m,\n", |
| 932 | + " content: \u001b[32m\"Your name is Bob. How can I assist you further?\"\u001b[39m,\n", |
932 | 933 | " tool_calls: [],\n",
|
933 | 934 | " invalid_tool_calls: [],\n",
|
934 | 935 | " additional_kwargs: {},\n",
|
935 | 936 | " response_metadata: {}\n",
|
936 | 937 | " },\n",
|
937 | 938 | " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
|
938 |
| - " content: \u001b[32m\"Your name is Bob! How can I help you, Bob?\"\u001b[39m,\n", |
| 939 | + " content: \u001b[32m\"Your name is Bob. How can I assist you further?\"\u001b[39m,\n", |
939 | 940 | " name: \u001b[90mundefined\u001b[39m,\n",
|
940 | 941 | " additional_kwargs: {},\n",
|
941 | 942 | " response_metadata: {},\n",
|
942 | 943 | " tool_calls: [],\n",
|
943 | 944 | " invalid_tool_calls: []\n",
|
944 | 945 | " }\n",
|
945 | 946 | " ],\n",
|
946 |
| - " output: \u001b[32m\"Your name is Bob! How can I help you, Bob?\"\u001b[39m\n", |
| 947 | + " output: \u001b[32m\"Your name is Bob. How can I assist you further?\"\u001b[39m\n", |
947 | 948 | "}"
|
948 | 949 | ]
|
949 | 950 | },
|
|
954 | 955 | ],
|
955 | 956 | "source": [
|
956 | 957 | "await agentWithChatHistory.invoke(\n",
|
957 |
| - " { input: \"what's my name?\" },\n", |
958 |
| - " { configurable: { sessionId: \"<foo>\" }},\n", |
| 958 | + " { input: \"what's my name?\" },\n", |
| 959 | + " { configurable: { sessionId: \"<foo>\" }},\n", |
959 | 960 | ")"
|
960 | 961 | ]
|
961 | 962 | },
|
|
972 | 973 | "id": "c029798f",
|
973 | 974 | "metadata": {},
|
974 | 975 | "source": [
|
975 |
| - "## Conclusion\n", |
| 976 | + "## Next steps\n", |
976 | 977 | "\n",
|
977 | 978 | "That's a wrap! In this quick start we covered how to create a simple agent. Agents are a complex topic, and there's lot to learn! \n",
|
978 | 979 | "\n",
|
979 | 980 | ":::{.callout-important}\n",
|
980 |
| - "This section covered building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](/docs/concepts/#langgraph)\n", |
| 981 | + "This section covered building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](/docs/concepts/#langgraph).\n", |
| 982 | + "\n", |
| 983 | + "You can also see [this guide to help migrate to LangGraph](/docs/how_to/migrate_agent).\n", |
981 | 984 | ":::"
|
982 | 985 | ]
|
983 | 986 | }
|
|
0 commit comments