Skip to content

Commit afe421f

Browse files
committed
created a function and variable to avoid repetition
1 parent 42aa548 commit afe421f

File tree

1 file changed

+18
-32
lines changed
  • libs/core/langchain_core/output_parsers

1 file changed

+18
-32
lines changed

libs/core/langchain_core/output_parsers/base.py

Lines changed: 18 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,20 @@
2828
T = TypeVar("T")
2929
OutputParserLike = Runnable[LanguageModelOutput, T]
3030

31+
# Centralized error message
32+
MAX_TOKENS_ERROR = (
33+
"Output parser received a max_tokens stop reason. "
34+
"The output is likely incomplete—please increase `max_tokens` "
35+
"or shorten your prompt."
36+
)
37+
38+
39+
def _raise_max_tokens_error(e: ValidationError, input_message: BaseMessage) -> None:
40+
"""Check if error is due to max_tokens and raise appropriate error."""
41+
if input_message.response_metadata.get("stop_reason") == "max_tokens":
42+
raise ValueError(MAX_TOKENS_ERROR) from e
43+
raise e
44+
3145

3246
class BaseLLMOutputParser(Generic[T], ABC):
3347
"""Abstract base class for parsing the outputs of a model."""
@@ -100,14 +114,7 @@ def invoke(
100114
run_type="parser",
101115
)
102116
except ValidationError as e:
103-
if input.response_metadata.get("stop_reason") == "max_tokens":
104-
max_tokens_error = (
105-
"Output parser received a max_tokens stop reason. "
106-
"The output is likely incomplete—please increase `max_tokens` "
107-
"or shorten your prompt."
108-
)
109-
raise ValueError(max_tokens_error) from e
110-
raise
117+
_raise_max_tokens_error(e, input)
111118
return self._call_with_config(
112119
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
113120
input,
@@ -133,14 +140,7 @@ async def ainvoke(
133140
run_type="parser",
134141
)
135142
except ValidationError as e:
136-
if input.response_metadata.get("stop_reason") == "max_tokens":
137-
max_tokens_error = (
138-
"Output parser received a max_tokens stop reason. "
139-
"The output is likely incomplete—please increase `max_tokens` "
140-
"or shorten your prompt."
141-
)
142-
raise ValueError(max_tokens_error) from e
143-
raise
143+
_raise_max_tokens_error(e, input)
144144
return await self._acall_with_config(
145145
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
146146
input,
@@ -224,14 +224,7 @@ def invoke(
224224
run_type="parser",
225225
)
226226
except ValidationError as e:
227-
if input.response_metadata.get("stop_reason") == "max_tokens":
228-
max_tokens_error = (
229-
"Output parser received a max_tokens stop reason. "
230-
"The output is likely incomplete—please increase `max_tokens` "
231-
"or shorten your prompt."
232-
)
233-
raise ValueError(max_tokens_error) from e
234-
raise
227+
_raise_max_tokens_error(e, input)
235228
return self._call_with_config(
236229
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
237230
input,
@@ -257,14 +250,7 @@ async def ainvoke(
257250
run_type="parser",
258251
)
259252
except ValidationError as e:
260-
if input.response_metadata.get("stop_reason") == "max_tokens":
261-
max_tokens_error = (
262-
"Output parser received a max_tokens stop reason. "
263-
"The output is likely incomplete—please increase `max_tokens` "
264-
"or shorten your prompt."
265-
)
266-
raise ValueError(max_tokens_error) from e
267-
raise
253+
_raise_max_tokens_error(e, input)
268254
return await self._acall_with_config(
269255
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
270256
input,

0 commit comments

Comments
 (0)