28
28
T = TypeVar ("T" )
29
29
OutputParserLike = Runnable [LanguageModelOutput , T ]
30
30
31
+ # Centralized error message
32
+ MAX_TOKENS_ERROR = (
33
+ "Output parser received a max_tokens stop reason. "
34
+ "The output is likely incomplete—please increase `max_tokens` "
35
+ "or shorten your prompt."
36
+ )
37
+
38
+
39
+ def _raise_max_tokens_error (e : ValidationError , input_message : BaseMessage ) -> None :
40
+ """Check if error is due to max_tokens and raise appropriate error."""
41
+ if input_message .response_metadata .get ("stop_reason" ) == "max_tokens" :
42
+ raise ValueError (MAX_TOKENS_ERROR ) from e
43
+ raise e
44
+
31
45
32
46
class BaseLLMOutputParser (Generic [T ], ABC ):
33
47
"""Abstract base class for parsing the outputs of a model."""
@@ -100,14 +114,7 @@ def invoke(
100
114
run_type = "parser" ,
101
115
)
102
116
except ValidationError as e :
103
- if input .response_metadata .get ("stop_reason" ) == "max_tokens" :
104
- max_tokens_error = (
105
- "Output parser received a max_tokens stop reason. "
106
- "The output is likely incomplete—please increase `max_tokens` "
107
- "or shorten your prompt."
108
- )
109
- raise ValueError (max_tokens_error ) from e
110
- raise
117
+ _raise_max_tokens_error (e , input )
111
118
return self ._call_with_config (
112
119
lambda inner_input : self .parse_result ([Generation (text = inner_input )]),
113
120
input ,
@@ -133,14 +140,7 @@ async def ainvoke(
133
140
run_type = "parser" ,
134
141
)
135
142
except ValidationError as e :
136
- if input .response_metadata .get ("stop_reason" ) == "max_tokens" :
137
- max_tokens_error = (
138
- "Output parser received a max_tokens stop reason. "
139
- "The output is likely incomplete—please increase `max_tokens` "
140
- "or shorten your prompt."
141
- )
142
- raise ValueError (max_tokens_error ) from e
143
- raise
143
+ _raise_max_tokens_error (e , input )
144
144
return await self ._acall_with_config (
145
145
lambda inner_input : self .aparse_result ([Generation (text = inner_input )]),
146
146
input ,
@@ -224,14 +224,7 @@ def invoke(
224
224
run_type = "parser" ,
225
225
)
226
226
except ValidationError as e :
227
- if input .response_metadata .get ("stop_reason" ) == "max_tokens" :
228
- max_tokens_error = (
229
- "Output parser received a max_tokens stop reason. "
230
- "The output is likely incomplete—please increase `max_tokens` "
231
- "or shorten your prompt."
232
- )
233
- raise ValueError (max_tokens_error ) from e
234
- raise
227
+ _raise_max_tokens_error (e , input )
235
228
return self ._call_with_config (
236
229
lambda inner_input : self .parse_result ([Generation (text = inner_input )]),
237
230
input ,
@@ -257,14 +250,7 @@ async def ainvoke(
257
250
run_type = "parser" ,
258
251
)
259
252
except ValidationError as e :
260
- if input .response_metadata .get ("stop_reason" ) == "max_tokens" :
261
- max_tokens_error = (
262
- "Output parser received a max_tokens stop reason. "
263
- "The output is likely incomplete—please increase `max_tokens` "
264
- "or shorten your prompt."
265
- )
266
- raise ValueError (max_tokens_error ) from e
267
- raise
253
+ _raise_max_tokens_error (e , input )
268
254
return await self ._acall_with_config (
269
255
lambda inner_input : self .aparse_result ([Generation (text = inner_input )]),
270
256
input ,
0 commit comments