diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore
index e54ee59f57e4..e395cf09b033 100644
--- a/docs/core_docs/.gitignore
+++ b/docs/core_docs/.gitignore
@@ -95,6 +95,8 @@ docs/how_to/output_parser_structured.md
docs/how_to/output_parser_structured.mdx
docs/how_to/output_parser_json.md
docs/how_to/output_parser_json.mdx
+docs/how_to/multiple_queries.md
+docs/how_to/multiple_queries.mdx
docs/how_to/logprobs.md
docs/how_to/logprobs.mdx
docs/how_to/graph_semantic.md
@@ -138,6 +140,4 @@ docs/how_to/binding.mdx
docs/how_to/assign.md
docs/how_to/assign.mdx
docs/how_to/agent_executor.md
-docs/how_to/agent_executor.mdx
-docs/how_to/MultiQueryRetriever.md
-docs/how_to/MultiQueryRetriever.mdx
\ No newline at end of file
+docs/how_to/agent_executor.mdx
\ No newline at end of file
diff --git a/docs/core_docs/docs/concepts.mdx b/docs/core_docs/docs/concepts.mdx
index 7fdc1c8e254a..774238a1a825 100644
--- a/docs/core_docs/docs/concepts.mdx
+++ b/docs/core_docs/docs/concepts.mdx
@@ -449,10 +449,10 @@ const retriever = vectorstore.asRetriever();
### Retrievers
-A retriever is an interface that returns documents given an unstructured query.
-It is more general than a vector store.
+A retriever is an interface that returns relevant documents given an unstructured query.
+They are more general than a vector store.
A retriever does not need to be able to store documents, only to return (or retrieve) them.
-Retrievers can be created from vectorstores, but are also broad enough to include [Exa search](/docs/integrations/retrievers/exa/)(web search) and [Amazon Kendra](/docs/integrations/retrievers/kendra-retriever/).
+Retrievers can be created from vector stores, but are also broad enough to include [Exa search](/docs/integrations/retrievers/exa/)(web search) and [Amazon Kendra](/docs/integrations/retrievers/kendra-retriever/).
Retrievers accept a string query as input and return an array of Document's as output.
diff --git a/docs/core_docs/docs/how_to/caching_embeddings.mdx b/docs/core_docs/docs/how_to/caching_embeddings.mdx
index cd70d4d8b0b7..72b5f5f2bd6e 100644
--- a/docs/core_docs/docs/how_to/caching_embeddings.mdx
+++ b/docs/core_docs/docs/how_to/caching_embeddings.mdx
@@ -1,10 +1,17 @@
import CodeBlock from "@theme/CodeBlock";
import InMemoryExample from "@examples/embeddings/cache_backed_in_memory.ts";
-import ConvexExample from "@examples/embeddings/convex/cache_backed_convex.ts";
import RedisExample from "@examples/embeddings/cache_backed_redis.ts";
# How to cache embedding results
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Embeddings](/docs/concepts/#embedding-models)
+
+:::
+
Embeddings can be stored or temporarily cached to avoid needing to recompute them.
Caching embeddings can be done using a `CacheBackedEmbeddings` instance.
@@ -15,13 +22,13 @@ The text is hashed and the hash is used as the key in the cache.
The main supported way to initialized a `CacheBackedEmbeddings` is the `fromBytesStore` static method. This takes in the following parameters:
-- `underlying_embedder`: The embedder to use for embedding.
-- `document_embedding_cache`: The cache to use for storing document embeddings.
-- `namespace`: (optional, defaults to "") The namespace to use for document cache. This namespace is used to avoid collisions with other caches. For example, set it to the name of the embedding model used.
+- `underlyingEmbeddings`: The embeddings model to use.
+- `documentEmbeddingCache`: The cache to use for storing document embeddings.
+- `namespace`: (optional, defaults to "") The namespace to use for document cache. This namespace is used to avoid collisions with other caches. For example, you could set it to the name of the embedding model used.
**Attention:** Be sure to set the namespace parameter to avoid collisions of the same text embedded using different embeddings models.
-## Usage, in-memory
+## In-memory
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
@@ -36,47 +43,7 @@ Do not use this cache if you need to actually store the embeddings for an extend
{InMemoryExample}
-## Usage, Convex
-
-Here's an example with a [Convex](https://convex.dev/) as a cache.
-
-### Create project
-
-Get a working [Convex](https://docs.convex.dev/) project set up, for example by using:
-
-```bash
-npm create convex@latest
-```
-
-### Add database accessors
-
-Add query and mutation helpers to `convex/langchain/db.ts`:
-
-```ts title="convex/langchain/db.ts"
-export * from "langchain/util/convex";
-```
-
-### Configure your schema
-
-Set up your schema (for indexing):
-
-```ts title="convex/schema.ts"
-import { defineSchema, defineTable } from "convex/server";
-import { v } from "convex/values";
-
-export default defineSchema({
- cache: defineTable({
- key: v.string(),
- value: v.any(),
- }).index("byKey", ["key"]),
-});
-```
-
-### Example
-
-{ConvexExample}
-
-## Usage, Redis
+## Redis
Here's an example with a Redis cache.
@@ -87,3 +54,9 @@ npm install ioredis
```
{RedisExample}
+
+## Next steps
+
+You've now learned how to use caching to avoid recomputing embeddings.
+
+Next, check out the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag).
diff --git a/docs/core_docs/docs/how_to/contextual_compression.mdx b/docs/core_docs/docs/how_to/contextual_compression.mdx
index d4c64ecbe8f8..47a64b8c8d3f 100644
--- a/docs/core_docs/docs/how_to/contextual_compression.mdx
+++ b/docs/core_docs/docs/how_to/contextual_compression.mdx
@@ -1,9 +1,14 @@
----
-hide_table_of_contents: true
----
-
# How to do retrieval with contextual compression
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Retrievers](/docs/concepts/#retrievers)
+- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
+
+:::
+
One challenge with retrieval is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses.
Contextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. “Compressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale.
@@ -58,3 +63,10 @@ This skips the need to add documents to a vector store to perform similarity sea
import DocumentCompressorPipelineExample from "@examples/retrievers/document_compressor_pipeline.ts";
{DocumentCompressorPipelineExample}
+
+## Next steps
+
+You've now learned a few ways to use contextual compression to remove bad data from your results.
+
+See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
+[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/custom_retriever.mdx b/docs/core_docs/docs/how_to/custom_retriever.mdx
index a425b7223364..b57b3b160f22 100644
--- a/docs/core_docs/docs/how_to/custom_retriever.mdx
+++ b/docs/core_docs/docs/how_to/custom_retriever.mdx
@@ -1,14 +1,17 @@
----
-hide_table_of_contents: true
-sidebar_position: 0
----
-
# How to write a custom retriever class
-To create your own retriever, you need to extend the [`BaseRetriever` class](https://api.js.langchain.com/classes/langchain_core_retrievers.BaseRetriever.html)
-and implement a `_getRelevantDocuments` method that takes a `string` as its first parameter and an optional `runManager` for tracing.
-This method should return an array of `Document`s fetched from some source. This process can involve calls to a database or to the web using `fetch`.
-Note the underscore before `_getRelevantDocuments()` - the base class wraps the non-prefixed version in order to automatically handle tracing of the original call.
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Retrievers](/docs/concepts/#retrievers)
+
+:::
+
+To create your own retriever, you need to extend the [`BaseRetriever`](https://api.js.langchain.com/classes/langchain_core_retrievers.BaseRetriever.html) class
+and implement a `_getRelevantDocuments` method that takes a `string` as its first parameter (and an optional `runManager` for tracing).
+This method should return an array of `Document`s fetched from some source. This process can involve calls to a database, to the web using `fetch`, or any other source.
+Note the underscore before `_getRelevantDocuments()`. The base class wraps the non-prefixed version in order to automatically handle tracing of the original call.
Here's an example of a custom retriever that returns static documents:
@@ -70,3 +73,9 @@ await retriever.invoke("LangChain docs");
}
]
```
+
+## Next steps
+
+You've now seen an example of implementing your own custom retriever.
+
+Next, check out the individual sections for deeper dives on specific retrievers, or the [broader tutorial on RAG](/docs/tutorials/rag).
diff --git a/docs/core_docs/docs/how_to/embed_text.mdx b/docs/core_docs/docs/how_to/embed_text.mdx
index 620b68b29179..f5b3132f5618 100644
--- a/docs/core_docs/docs/how_to/embed_text.mdx
+++ b/docs/core_docs/docs/how_to/embed_text.mdx
@@ -8,7 +8,13 @@ sidebar_position: 2
Head to [Integrations](/docs/integrations/text_embedding) for documentation on built-in integrations with text embedding providers.
:::
-The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Embeddings](/docs/concepts/#embedding-models)
+
+:::
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
@@ -16,8 +22,6 @@ The base Embeddings class in LangChain exposes two methods: one for embedding do
## Get started
-Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents.
-
Below is an example of how to use the OpenAI embeddings. Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a `embedQuery` and `embedDocuments` method.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
@@ -77,3 +81,9 @@ const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
]
*/
```
+
+## Next steps
+
+You've now learned how to use embeddings models with queries and text.
+
+Next, check out how to [avoid excessively recomputing embeddings with caching](/docs/how_to/caching_embeddings), or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag).
diff --git a/docs/core_docs/docs/how_to/index.mdx b/docs/core_docs/docs/how_to/index.mdx
index 12377e818e57..660e98b374b0 100644
--- a/docs/core_docs/docs/how_to/index.mdx
+++ b/docs/core_docs/docs/how_to/index.mdx
@@ -127,14 +127,14 @@ Embedding Models take a piece of text and create a numerical representation of i
Vector stores are databases that can efficiently store and retrieve embeddings.
-- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
+- [How to: create and query vector stores](/docs/how_to/vectorstores)
### Retrievers
Retrievers are responsible for taking a query and returning relevant documents.
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever)
-- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever)
+- [How to: generate multiple queries to retrieve data for](/docs/how_to/multiple_queries)
- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression)
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever)
@@ -144,7 +144,7 @@ Retrievers are responsible for taking a query and returning relevant documents.
- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever)
- [How to: generate metadata filters](/docs/how_to/self_query)
- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore)
-- [How to: use a Matryoshka retriever](/docs/how_to/matryoshka_retriever)
+- [How to: reduce retrieval latency](/docs/how_to/reduce_retrieval_latency)
### Indexing
diff --git a/docs/core_docs/docs/how_to/multi_vector.mdx b/docs/core_docs/docs/how_to/multi_vector.mdx
index 2f263cf1cee1..7830d30d44ea 100644
--- a/docs/core_docs/docs/how_to/multi_vector.mdx
+++ b/docs/core_docs/docs/how_to/multi_vector.mdx
@@ -1,18 +1,24 @@
----
-hide_table_of_contents: true
----
-
# How to generate multiple embeddings per document
-It can often be beneficial to store multiple vectors per document.
-LangChain has a base MultiVectorRetriever which makes querying this type of setup easier!
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Retrievers](/docs/concepts/#retrievers)
+- [Text splitters](/docs/concepts/#text-splitters)
+- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
+
+:::
+
+Embedding different representations of an original document, then returning the original document when any of the representations result in a search hit, can allow you to
+tune and improve your retrieval performance. LangChain has a base [`MultiVectorRetriever`](https://api.js.langchain.com/classes/langchain_retrievers_multi_vector.MultiVectorRetriever.html) designed to do just this!
A lot of the complexity lies in how to create the multiple vectors per document.
-This notebook covers some of the common ways to create those vectors and use the MultiVectorRetriever.
+This guide covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.
Some methods to create multiple vectors per document include:
-- smaller chunks: split a document into smaller chunks, and embed those (e.g. the [ParentDocumentRetriever](/docs/modules/data_connection/retrievers/parent-document-retriever))
+- smaller chunks: split a document into smaller chunks, and embed those (e.g. the [`ParentDocumentRetriever`](/docs/modules/data_connection/retrievers/parent-document-retriever))
- summary: create a summary for each document, embed that along with (or instead of) the document
- hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document
@@ -54,3 +60,10 @@ These questions can then be embedded and used to retrieve the original document:
import HypotheticalExample from "@examples/retrievers/multi_vector_hypothetical.ts";
{HypotheticalExample}
+
+## Next steps
+
+You've now learned a few ways to generate multiple embeddings per document.
+
+Next, check out the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
+[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/MultiQueryRetriever.ipynb b/docs/core_docs/docs/how_to/multiple_queries.ipynb
similarity index 83%
rename from docs/core_docs/docs/how_to/MultiQueryRetriever.ipynb
rename to docs/core_docs/docs/how_to/multiple_queries.ipynb
index e5082a817a19..84e86aaa81bb 100644
--- a/docs/core_docs/docs/how_to/MultiQueryRetriever.ipynb
+++ b/docs/core_docs/docs/how_to/multiple_queries.ipynb
@@ -6,14 +6,23 @@
"source": [
"# How to generate multiple queries to retrieve data for\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "\n",
+ "- [Vector stores](/docs/concepts/#vectorstores)\n",
+ "- [Retrievers](/docs/concepts/#retrievers)\n",
+ "- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)\n",
+ "\n",
+ ":::\n",
"\n",
"Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\".\n",
"But retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well.\n",
"Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n",
"\n",
- "The MultiQueryRetriever automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n",
+ "The [`MultiQueryRetriever`](https://api.js.langchain.com/classes/langchain_retrievers_multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n",
"For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents.\n",
- "By generating multiple perspectives on the same question, the MultiQueryRetriever might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.\n",
+ "By generating multiple perspectives on the same question, the `MultiQueryRetriever` can help overcome some of the limitations of the distance-based retrieval and get a richer set of results.\n",
"\n",
"## Get started\n",
"\n",
@@ -24,14 +33,14 @@
"\n",
"\n",
"\n",
- " @langchain/anthropic @langchain/community\n",
+ " @langchain/anthropic @langchain/cohere\n",
"\n",
"```"
]
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 1,
"metadata": {},
"outputs": [
{
@@ -80,7 +89,11 @@
" [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],\n",
" embeddings\n",
");\n",
- "const model = new ChatAnthropic({});\n",
+ "\n",
+ "const model = new ChatAnthropic({\n",
+ " model: \"claude-3-sonnet-20240229\"\n",
+ "});\n",
+ "\n",
"const retriever = MultiQueryRetriever.fromLLM({\n",
" llm: model,\n",
" retriever: vectorstore.asRetriever(),\n",
@@ -108,25 +121,49 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 2,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[\n",
+ " Document {\n",
+ " pageContent: \"Mitochondrien bestehen aus Lipiden\",\n",
+ " metadata: {}\n",
+ " },\n",
+ " Document {\n",
+ " pageContent: \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n",
+ " metadata: {}\n",
+ " },\n",
+ " Document {\n",
+ " pageContent: \"Gebäude werden aus Stein hergestellt\",\n",
+ " metadata: { id: 3 }\n",
+ " },\n",
+ " Document {\n",
+ " pageContent: \"Autos werden aus Metall hergestellt\",\n",
+ " metadata: { id: 4 }\n",
+ " },\n",
+ " Document {\n",
+ " pageContent: \"Gebäude werden aus Holz hergestellt\",\n",
+ " metadata: { id: 2 }\n",
+ " },\n",
+ " Document {\n",
+ " pageContent: \"Gebäude werden aus Ziegelsteinen hergestellt\",\n",
+ " metadata: { id: 1 }\n",
+ " }\n",
+ "]\n"
+ ]
+ }
+ ],
"source": [
- "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n",
- "import { MultiQueryRetriever } from \"langchain/retrievers/multi_query\";\n",
"import { LLMChain } from \"langchain/chains\";\n",
"import { pull } from \"langchain/hub\";\n",
"import { BaseOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { PromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatAnthropic } from \"@langchain/anthropic\";\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [],
- "source": [
+ "\n",
+ "\n",
"type LineList = {\n",
" lines: string[];\n",
"};\n",
@@ -155,60 +192,13 @@
" getFormatInstructions(): string {\n",
" throw new Error(\"Not implemented.\");\n",
" }\n",
- "}"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {},
- "outputs": [],
- "source": [
- "// Default prompt is available at: https://smith.langchain.com/hub/jacob/multi-vector-retriever\n",
+ "}\n",
+ "\n",
+ "// Default prompt is available at: https://smith.langchain.com/hub/jacob/multi-vector-retriever-german\n",
"const prompt: PromptTemplate = await pull(\n",
" \"jacob/multi-vector-retriever-german\"\n",
- ");"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " Document {\n",
- " pageContent: \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n",
- " metadata: {}\n",
- " },\n",
- " Document {\n",
- " pageContent: \"Mitochondrien bestehen aus Lipiden\",\n",
- " metadata: {}\n",
- " },\n",
- " Document {\n",
- " pageContent: \"Autos werden aus Metall hergestellt\",\n",
- " metadata: { id: 4 }\n",
- " },\n",
- " Document {\n",
- " pageContent: \"Gebäude werden aus Stein hergestellt\",\n",
- " metadata: { id: 3 }\n",
- " },\n",
- " Document {\n",
- " pageContent: \"Gebäude werden aus Holz hergestellt\",\n",
- " metadata: { id: 2 }\n",
- " },\n",
- " Document {\n",
- " pageContent: \"Gebäude werden aus Ziegelsteinen hergestellt\",\n",
- " metadata: { id: 1 }\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
+ ");\n",
+ "\n",
"const vectorstore = await MemoryVectorStore.fromTexts(\n",
" [\n",
" \"Gebäude werden aus Ziegelsteinen hergestellt\",\n",
@@ -242,6 +232,18 @@
"\n",
"console.log(retrievedDocs);"
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Next steps\n",
+ "\n",
+ "You've now learned how to use the `MultiQueryRetriever` to query a vector store with automatically generated queries.\n",
+ "\n",
+ "See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to\n",
+ "[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom)."
+ ]
}
],
"metadata": {
diff --git a/docs/core_docs/docs/how_to/parent_document_retriever.mdx b/docs/core_docs/docs/how_to/parent_document_retriever.mdx
index 54ab64013b16..7f65ffcbfa0d 100644
--- a/docs/core_docs/docs/how_to/parent_document_retriever.mdx
+++ b/docs/core_docs/docs/how_to/parent_document_retriever.mdx
@@ -1,7 +1,3 @@
----
-hide_table_of_contents: true
----
-
import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/parent_document_retriever.ts";
import ExampleWithScoreThreshold from "@examples/retrievers/parent_document_retriever_score_threshold.ts";
@@ -10,15 +6,27 @@ import ExampleWithRerank from "@examples/retrievers/parent_document_retriever_re
# How to retrieve the whole document for a chunk
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Retrievers](/docs/concepts/#retrievers)
+- [Text splitters](/docs/concepts/#text-splitters)
+- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
+
+:::
+
When splitting documents for retrieval, there are often conflicting desires:
-1. You may want to have small documents, so that their embeddings can most accurately reflect their meaning. If too long, then the embeddings can lose meaning.
+1. You may want to have small documents, so that their embeddings can most accurately reflect their meaning. If documents are too long, then the embeddings can lose meaning.
2. You want to have long enough documents that the context of each chunk is retained.
-The ParentDocumentRetriever strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents.
+The [`ParentDocumentRetriever`](https://api.js.langchain.com/classes/langchain_retrievers_parent_document.ParentDocumentRetriever.html) strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents.
Note that "parent document" refers to the document that a small chunk originated from. This can either be the whole raw document OR a larger chunk.
+This is a more specific form of [generating multiple embeddings per document](/docs/how_to/multi_vector).
+
## Usage
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
@@ -60,3 +68,10 @@ more expensive.
So there are two reasons to use rerank - precision and costs.
{ExampleWithRerank}
+
+## Next steps
+
+You've now learned how to use the `ParentDocumentRetriever`.
+
+Next, check out the more general form of [generating multiple embeddings per document](/docs/how_to/multi_vector), the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
+[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/matryoshka_retriever.mdx b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx
similarity index 56%
rename from docs/core_docs/docs/how_to/matryoshka_retriever.mdx
rename to docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx
index 39f20b2201e4..7f2cdedd4f05 100644
--- a/docs/core_docs/docs/how_to/matryoshka_retriever.mdx
+++ b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx
@@ -1,15 +1,19 @@
-# How to use a Matryoshka retriever
+# How to reduce retrieval latency
-This is an implementation of the [Supabase](https://supabase.com/) blog post
-["Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval"](https://supabase.com/blog/matryoshka-embeddings).
+:::info Prerequisites
-
+This guide assumes familiarity with the following concepts:
-### Overview
+- [Retrievers](/docs/concepts/#retrievers)
+- [Embeddings](/docs/concepts/#embedding-models)
+- [Vector stores](/docs/concepts/#vectorstores)
+- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
-This class performs "Adaptive Retrieval" for searching text embeddings efficiently using the
-Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query
-embedding in two steps:
+:::
+
+One way to reduce retrieval latency is through a technique called "Adaptive Retrieval".
+The [`MatryoshkaRetriever`](https://api.js.langchain.com/classes/langchain_retrievers_matryoshka_retriever.MatryoshkaRetriever.html) uses the
+Matryoshka Representation Learning (MRL) technique to retrieve documents for a given query in two steps:
- **First-pass**: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,
but less accurate search.
@@ -17,10 +21,10 @@ embedding in two steps:
- **Second-pass**: Re-ranks the top results from the first pass using the full, high-dimensional
embedding for higher accuracy.
-This code demonstrates using MRL embeddings for efficient vector search by combining faster,
-lower-dimensional initial search with accurate, high-dimensional re-ranking.
+
-## Example
+It is based on this [Supabase](https://supabase.com/) blog post
+["Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval"](https://supabase.com/blog/matryoshka-embeddings).
### Setup
@@ -48,3 +52,10 @@ import Example from "@examples/retrievers/matryoshka_retriever.ts";
:::note
Due to the constraints of some vector stores, the large embedding metadata field is stringified (`JSON.stringify`) before being stored. This means that the metadata field will need to be parsed (`JSON.parse`) when retrieved from the vector store.
:::
+
+## Next steps
+
+You've now learned a technique that can help speed up your retrieval queries.
+
+Next, check out the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
+[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/structured_output.ipynb b/docs/core_docs/docs/how_to/structured_output.ipynb
index 5793fd23bf96..e4d1ca8d5331 100644
--- a/docs/core_docs/docs/how_to/structured_output.ipynb
+++ b/docs/core_docs/docs/how_to/structured_output.ipynb
@@ -19,13 +19,13 @@
"\n",
"It is often useful to have a model return output that matches some specific schema. One common use-case is extracting data from arbitrary text to insert into a traditional database or use with some other downstrem system. This guide will show you a few different strategies you can use to do this.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
"\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
"\n",
"## The `.withStructuredOutput()` method\n",
"\n",
diff --git a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx
index 15de1d8e49b1..524dadc0bbce 100644
--- a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx
+++ b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx
@@ -1,6 +1,17 @@
# How to create a time-weighted retriever
-This retriever uses a combination of semantic similarity and a time decay.
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Retrievers](/docs/concepts/#retrievers)
+- [Vector stores](/docs/concepts/#vectorstores)
+- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
+
+:::
+
+This guide covers the [`TimeWeightedVectorStoreRetriever`](https://api.js.langchain.com/classes/langchain_retrievers_time_weighted.TimeWeightedVectorStoreRetriever.html),
+which uses a combination of semantic similarity and a time decay.
The algorithm for scoring them is:
@@ -18,9 +29,6 @@ let score = (1.0 - this.decayRate) ** hoursPassed + vectorRelevance;
Note that setting a decay rate of exactly 0 or 1 makes `hoursPassed` irrelevant and makes this retriever equivalent to a standard vector lookup.
-## Usage
-
-This example shows how to intialize a `TimeWeightedVectorStoreRetriever` with a vector store.
It is important to note that due to required metadata, all documents must be added to the backing vector store using the `addDocuments` method on the **retriever**, not the vector store itself.
import CodeBlock from "@theme/CodeBlock";
@@ -35,3 +43,10 @@ npm install @langchain/openai
```
{Example}
+
+## Next steps
+
+You've now learned how to use time as a factor when performing retrieval.
+
+Next, check out the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
+[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx
index 946bd92df0bd..fe1eeeff9f4a 100644
--- a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx
+++ b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx
@@ -1,73 +1,26 @@
----
-sidebar_position: 0
----
-
# How use a vector store to retrieve data
-A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store.
-A retriever does not need to be able to store documents, only to return (or retrieve) them. Vector stores can be used
-as the backbone of a retriever, but there are other types of retrievers as well.
-
-Retrievers accept a string query as input and return a list of `Document`'s as output.
-
-## Advanced Retrieval Types
-
-LangChain provides several advanced retrieval types. A full list is below, along with the following information:
-
-**Name**: Name of the retrieval algorithm.
+:::info Prerequisites
-**Index Type**: Which index type (if any) this relies on.
+This guide assumes familiarity with the following concepts:
-**Uses an LLM**: Whether this retrieval method uses an LLM.
+- [Vector stores](/docs/concepts/#vectorstores)
+- [Retrievers](/docs/concepts/#retrievers)
+- [Text splitters](/docs/concepts#text-splitters)
+- [Chaining runnables](/docs/how_to/sequence/)
-**When to Use**: Our commentary on when you should considering using this retrieval method.
+:::
-**Description**: Description of what this retrieval algorithm is doing.
+Vector stores can be converted into retrievers using the [`.asRetriever()`](https://api.js.langchain.com/classes/langchain_core_vectorstores.VectorStore.html#asRetriever) method, which allows you to more easily compose them in chains.
-| Name | Index Type | Uses an LLM | When to Use | Description |
-| ----------------------------------------------------------------------------------------------- | ---------------------------- | ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [Vectorstore](/docs/modules/data_connection/retrievers/vectorstore) | Vectorstore | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
-| [ParentDocument](/docs/modules/data_connection/retrievers/parent-document-retriever) | Vectorstore + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
-| [Multi Vector](/docs/modules/data_connection/retrievers/multi-vector-retriever) | Vectorstore + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
-| [Self Query](/docs/modules/data_connection/retrievers/self_query/) | Vectorstore | Yes | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
-| [Contextual Compression](/docs/modules/data_connection/retrievers/contextual_compression) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
-| [Time-Weighted Vectorstore](/docs/modules/data_connection/retrievers/time_weighted_vectorstore) | Vectorstore | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
-| [Multi-Query Retriever](/docs/modules/data_connection/retrievers/multi-query-retriever) | Any | Yes | If users are asking questions that are complex and require multiple pieces of distinct information to respond | This uses an LLM to generate multiple queries from the original one. This is useful when the original query needs pieces of information about multiple topics to be properly answered. By generating multiple queries, we can then fetch documents for each of them. |
+Below, we show a retrieval-augmented generation (RAG) chain that performs question answering over documents using the following steps:
-## [Third Party Integrations](/docs/integrations/retrievers/)
-
-LangChain also integrates with many third-party retrieval services. For a full list of these, check out [this list](/docs/integrations/retrievers/) of all integrations.
-
-## Get started
-
-The public API of the `BaseRetriever` class in LangChain.js is as follows:
-
-```typescript
-export abstract class BaseRetriever {
- abstract getRelevantDocuments(query: string): Promise;
-}
-```
-
-It's that simple! You can call `getRelevantDocuments` to retrieve documents relevant to a query, where "relevance" is defined by
-the specific retriever object you are calling.
-
-Of course, we also help construct what we think useful Retrievers are. The main type of Retriever in LangChain is a vector store retriever. We will focus on that here.
-
-**Note:** Before reading, it's important to understand [what a vector store is](/docs/concepts#vectorstores).
-
-This example showcases question answering over documents.
-We have chosen this as the example for getting started because it nicely combines a lot of different elements (Text splitters, embeddings, vectorstores) and then also shows how to use them in a chain.
-
-Question answering over documents consists of four steps:
-
-1. Create an index
-2. Create a Retriever from that index
-3. Create a question answering chain
+1. Initialize an vector store
+2. Create a retriever from that vector store
+3. Compose a question answering chain
4. Ask questions!
-Each of the steps has multiple sub steps and potential configurations, but we'll go through one common flow using HNSWLib, a local vector store.
-This assumes you're using Node, but you can swap in another integration if necessary.
-
+Each of the steps has multiple sub steps and potential configurations, but we'll go through one common flow.
First, install the required dependency:
import CodeBlock from "@theme/CodeBlock";
@@ -77,7 +30,7 @@ import IntegrationInstallTooltip from "@mdx_components/integration_install_toolt
```bash npm2yarn
-npm install @langchain/openai hnswlib-node @langchain/community
+npm install @langchain/openai
```
You can download the `state_of_the_union.txt` file [here](https://github.com/langchain-ai/langchain/blob/master/docs/docs/modules/state_of_the_union.txt).
@@ -97,5 +50,9 @@ Let's walk through what's happening here.
4. We ask questions!
-See the individual sections for deeper dives on specific retrievers, or this section to learn how to
+## Next steps
+
+You've now learned how to convert a vector store as a retriever.
+
+See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
[create your own custom retriever over any data source](/docs/modules/data_connection/retrievers/custom).
diff --git a/docs/core_docs/docs/how_to/vectorstores.mdx b/docs/core_docs/docs/how_to/vectorstores.mdx
index ef2d6be9c6f3..6d1560cc9222 100644
--- a/docs/core_docs/docs/how_to/vectorstores.mdx
+++ b/docs/core_docs/docs/how_to/vectorstores.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 3
keywords: [similaritySearchWithScore]
---
@@ -9,20 +8,37 @@ keywords: [similaritySearchWithScore]
Head to [Integrations](/docs/integrations/vectorstores) for documentation on built-in integrations with vectorstore providers.
:::
+:::info Prerequisites
+
+This guide assumes familiarity with the following concepts:
+
+- [Vector stores](/docs/concepts/#vectorstores)
+- [Embeddings](/docs/concepts/#embedding-models)
+- [Document loaders](/docs/concepts#document-loaders)
+
+:::
+
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding
vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are
'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search
for you.
-## Get started
+This walkthrough uses a basic, unoptimized implementation called [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html) that stores embeddings in-memory and does an exact, linear search for the most similar embeddings.
+LangChain contains many built-in integrations - see [this section](/docs/how_to/vectorstores/#which-one-to-pick) for more, or the [full list of integrations](/docs/integrations/vectorstores/).
-This walkthrough showcases basic functionality related to VectorStores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model](/docs/concepts#embedding-models) interfaces before diving into this.
+## Creating a new index
-This walkthrough uses a basic, unoptimized implementation called MemoryVectorStore that stores embeddings in-memory and does an exact, linear search for the most similar embeddings.
+Most of the time, you'll need to load and prepare the data you want to search over. Here's an example that loads a recent speech from a file:
-## Usage
+import ExampleLoader from "@examples/indexes/vector_stores/memory_fromdocs.ts";
-### Create a new index from texts
+{ExampleLoader}
+
+Most of the time, you'll need to split the loaded text as a preparation step. See [this section](/docs/concepts/#text-splitters) to learn more about text splitters.
+
+## Creating a new index from texts
+
+If you have already prepared the data you want to search over, you can initialize a vector store directly from text chunks:
import CodeBlock from "@theme/CodeBlock";
import ExampleTexts from "@examples/indexes/vector_stores/memory.ts";
@@ -37,98 +53,6 @@ npm install @langchain/openai
{ExampleTexts}
-### Create a new index from a loader
-
-import ExampleLoader from "@examples/indexes/vector_stores/memory_fromdocs.ts";
-
-{ExampleLoader}
-
-Here is the current base interface all vector stores share:
-
-```typescript
-interface VectorStore {
- /**
- * Add more documents to an existing VectorStore.
- * Some providers support additional parameters, e.g. to associate custom ids
- * with added documents or to change the batch size of bulk inserts.
- * Returns an array of ids for the documents or nothing.
- */
- addDocuments(
- documents: Document[],
- options?: Record
- ): Promise;
-
- /**
- * Search for the most similar documents to a query
- */
- similaritySearch(
- query: string,
- k?: number,
- filter?: object | undefined
- ): Promise;
-
- /**
- * Search for the most similar documents to a query,
- * and return their similarity score
- */
- similaritySearchWithScore(
- query: string,
- k = 4,
- filter: object | undefined = undefined
- ): Promise<[object, number][]>;
-
- /**
- * Turn a VectorStore into a Retriever
- */
- asRetriever(k?: number): BaseRetriever;
-
- /**
- * Delete embedded documents from the vector store matching the passed in parameter.
- * Not supported by every provider.
- */
- delete(params?: Record): Promise;
-
- /**
- * Advanced: Add more documents to an existing VectorStore,
- * when you already have their embeddings
- */
- addVectors(
- vectors: number[][],
- documents: Document[],
- options?: Record
- ): Promise;
-
- /**
- * Advanced: Search for the most similar documents to a query,
- * when you already have the embedding of the query
- */
- similaritySearchVectorWithScore(
- query: number[],
- k: number,
- filter?: object
- ): Promise<[Document, number][]>;
-}
-```
-
-You can create a vector store from a list of [Documents](https://api.js.langchain.com/classes/langchain_core_documents.Document.html), or from a list of texts and their corresponding metadata. You can also create a vector store from an existing index, the signature of this method depends on the vector store you're using, check the documentation of the vector store you're interested in.
-
-```typescript
-abstract class BaseVectorStore implements VectorStore {
- static fromTexts(
- texts: string[],
- metadatas: object[] | object,
- embeddings: EmbeddingsInterface,
- dbConfig: Record
- ): Promise;
-
- static fromDocuments(
- docs: Document[],
- embeddings: EmbeddingsInterface,
- dbConfig: Record
- ): Promise;
-}
-```
-
## Which one to pick?
Here's a quick guide to help you pick the right vector store for your use case:
@@ -146,3 +70,9 @@ Here's a quick guide to help you pick the right vector store for your use case:
- If you're in search of a cost-effective vector database that allows run vector search with SQL, look no further than [MyScale](/docs/integrations/vectorstores/myscale).
- If you're in search of a vector database that you can load from both the browser and server side, check out [CloseVector](/docs/integrations/vectorstores/closevector). It's a vector database that aims to be cross-platform.
- If you're looking for a scalable, open-source columnar database with excellent performance for analytical queries, then consider [ClickHouse](/docs/integrations/vectorstores/clickhouse).
+
+## Next steps
+
+You've now learned how to load data into a vectorstore.
+
+Next, check out the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag).
diff --git a/examples/.eslintrc.cjs b/examples/.eslintrc.cjs
index 4f1f68d0bb86..961838316726 100644
--- a/examples/.eslintrc.cjs
+++ b/examples/.eslintrc.cjs
@@ -41,6 +41,7 @@ module.exports = {
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-else-return": 0,
+ "arrow-body-style": 0,
semi: ["error", "always"],
"unused-imports/no-unused-imports": "error",
},
diff --git a/examples/src/chains/retrieval_qa.ts b/examples/src/chains/retrieval_qa.ts
index f5c775b5a2c0..9123fbe183f8 100644
--- a/examples/src/chains/retrieval_qa.ts
+++ b/examples/src/chains/retrieval_qa.ts
@@ -1,26 +1,32 @@
-import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
+import * as fs from "node:fs";
+
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
-import * as fs from "fs";
-import { formatDocumentsAsString } from "langchain/util/document";
+import { MemoryVectorStore } from "langchain/vectorstores/memory";
import {
RunnablePassthrough,
RunnableSequence,
} from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
-import {
- ChatPromptTemplate,
- HumanMessagePromptTemplate,
- SystemMessagePromptTemplate,
-} from "@langchain/core/prompts";
+import { ChatPromptTemplate } from "@langchain/core/prompts";
+import type { Document } from "@langchain/core/documents";
+
+const formatDocumentsAsString = (documents: Document[]) => {
+ return documents.map((document) => document.pageContent).join("\n\n");
+};
// Initialize the LLM to use to answer the question.
-const model = new ChatOpenAI({});
+const model = new ChatOpenAI({
+ model: "gpt-4o",
+});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
-const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
+const vectorStore = await MemoryVectorStore.fromDocuments(
+ docs,
+ new OpenAIEmbeddings()
+);
// Initialize a retriever wrapper around the vector store
const vectorStoreRetriever = vectorStore.asRetriever();
@@ -30,11 +36,11 @@ const SYSTEM_TEMPLATE = `Use the following pieces of context to answer the quest
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}`;
-const messages = [
- SystemMessagePromptTemplate.fromTemplate(SYSTEM_TEMPLATE),
- HumanMessagePromptTemplate.fromTemplate("{question}"),
-];
-const prompt = ChatPromptTemplate.fromMessages(messages);
+
+const prompt = ChatPromptTemplate.fromMessages([
+ ["system", SYSTEM_TEMPLATE],
+ ["human", "{question}"],
+]);
const chain = RunnableSequence.from([
{
@@ -53,7 +59,7 @@ const answer = await chain.invoke(
console.log({ answer });
/*
-{
- answer: 'The president thanked Justice Stephen Breyer for his service and honored him for his dedication to the country.'
-}
+ {
+ answer: 'The president honored Justice Stephen Breyer by recognizing his dedication to serving the country as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. He thanked Justice Breyer for his service.'
+ }
*/
diff --git a/examples/src/retrievers/matryoshka_retriever.ts b/examples/src/retrievers/matryoshka_retriever.ts
index 5e5a86a94985..b9fe6a96906a 100644
--- a/examples/src/retrievers/matryoshka_retriever.ts
+++ b/examples/src/retrievers/matryoshka_retriever.ts
@@ -6,11 +6,12 @@ import { faker } from "@faker-js/faker";
const smallEmbeddings = new OpenAIEmbeddings({
model: "text-embedding-3-small",
- dimensions: 512, // Min num for small
+ dimensions: 512, // Min number for small
});
+
const largeEmbeddings = new OpenAIEmbeddings({
model: "text-embedding-3-large",
- dimensions: 3072, // Max num for large
+ dimensions: 3072, // Max number for large
});
const vectorStore = new Chroma(smallEmbeddings, {
@@ -58,10 +59,11 @@ await retriever.addDocuments(allDocs);
const query = "What is LangChain?";
const results = await retriever.invoke(query);
console.log(results.map(({ pageContent }) => pageContent).join("\n"));
+
/**
-I heart LangChain
-LangGraph is a new open source library by the LangChain team
-LangChain is an open source github repo
-LangChain announced GA of LangSmith last week!
-There are JS and PY versions of the LangChain github repos
- */
+ I heart LangChain
+ LangGraph is a new open source library by the LangChain team
+ LangChain is an open source github repo
+ LangChain announced GA of LangSmith last week!
+ There are JS and PY versions of the LangChain github repos
+*/
diff --git a/langchain-core/.eslintrc.cjs b/langchain-core/.eslintrc.cjs
index 6b750e5e8fee..98da58aebf69 100644
--- a/langchain-core/.eslintrc.cjs
+++ b/langchain-core/.eslintrc.cjs
@@ -68,5 +68,6 @@ module.exports = {
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
'jest/no-focused-tests': 'error',
+ "arrow-body-style": 0,
},
};
diff --git a/langchain/.eslintrc.cjs b/langchain/.eslintrc.cjs
index 61507cd81555..78a4a28e2174 100644
--- a/langchain/.eslintrc.cjs
+++ b/langchain/.eslintrc.cjs
@@ -68,5 +68,6 @@ module.exports = {
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
'jest/no-focused-tests': 'error',
+ "arrow-body-style": 0,
},
};
diff --git a/libs/langchain-community/.eslintrc.cjs b/libs/langchain-community/.eslintrc.cjs
index 344f8a9d6cd9..19da4b88cc2c 100644
--- a/libs/langchain-community/.eslintrc.cjs
+++ b/libs/langchain-community/.eslintrc.cjs
@@ -62,5 +62,6 @@ module.exports = {
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
+ "arrow-body-style": 0,
},
};
diff --git a/libs/langchain-scripts/.eslintrc.cjs b/libs/langchain-scripts/.eslintrc.cjs
index 344f8a9d6cd9..19da4b88cc2c 100644
--- a/libs/langchain-scripts/.eslintrc.cjs
+++ b/libs/langchain-scripts/.eslintrc.cjs
@@ -62,5 +62,6 @@ module.exports = {
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
+ "arrow-body-style": 0,
},
};