@@ -6,22 +6,18 @@ import {
6
6
} from "@/types"
7
7
import OpenAI from "openai"
8
8
import { z } from "zod"
9
- import ZodStream , {
10
- CompletionMeta ,
11
- OAIResponseParser ,
12
- OAIStream ,
13
- withResponseModel ,
14
- type Mode
15
- } from "zod-stream"
9
+ import ZodStream , { OAIResponseParser , OAIStream , withResponseModel , type Mode } from "zod-stream"
16
10
import { fromZodError } from "zod-validation-error"
17
11
18
12
import {
19
13
NON_OAI_PROVIDER_URLS ,
20
14
Provider ,
15
+ PROVIDER_PARAMS_TRANSFORMERS ,
21
16
PROVIDER_SUPPORTED_MODES ,
22
17
PROVIDER_SUPPORTED_MODES_BY_MODEL ,
23
18
PROVIDERS
24
19
} from "./constants/providers"
20
+ import { CompletionMeta } from "./types"
25
21
26
22
const MAX_RETRIES_DEFAULT = 0
27
23
@@ -109,7 +105,9 @@ class Instructor {
109
105
let validationIssues = ""
110
106
let lastMessage : OpenAI . ChatCompletionMessageParam | null = null
111
107
112
- const completionParams = withResponseModel ( {
108
+ const paramsTransformer = PROVIDER_PARAMS_TRANSFORMERS ?. [ this . provider ] ?. [ this . mode ]
109
+
110
+ let completionParams = withResponseModel ( {
113
111
params : {
114
112
...params ,
115
113
stream : false
@@ -118,6 +116,10 @@ class Instructor {
118
116
response_model
119
117
} )
120
118
119
+ if ( ! ! paramsTransformer ) {
120
+ completionParams = paramsTransformer ( completionParams )
121
+ }
122
+
121
123
const makeCompletionCall = async ( ) => {
122
124
let resolvedParams = completionParams
123
125
@@ -135,17 +137,33 @@ class Instructor {
135
137
}
136
138
}
137
139
138
- this . log ( "debug" , response_model . name , "making completion call with params: " , resolvedParams )
140
+ let completion : OpenAI . Chat . Completions . ChatCompletion | null = null
139
141
140
- const completion = await this . client . chat . completions . create ( resolvedParams )
142
+ try {
143
+ completion = await this . client . chat . completions . create ( resolvedParams )
144
+ this . log ( "debug" , "raw standard completion response: " , completion )
145
+ } catch ( error ) {
146
+ this . log (
147
+ "error" ,
148
+ `Error making completion call - mode: ${ this . mode } | Client base URL: ${ this . client . baseURL } | with params:` ,
149
+ resolvedParams ,
150
+ `raw error` ,
151
+ error
152
+ )
153
+
154
+ throw error
155
+ }
141
156
142
157
const parsedCompletion = OAIResponseParser (
143
158
completion as OpenAI . Chat . Completions . ChatCompletion
144
159
)
160
+
145
161
try {
146
- return JSON . parse ( parsedCompletion ) as z . infer < T >
162
+ const data = JSON . parse ( parsedCompletion ) as z . infer < T > & { _meta ?: CompletionMeta }
163
+ return { ...data , _meta : { usage : completion ?. usage ?? undefined } }
147
164
} catch ( error ) {
148
165
this . log ( "error" , "failed to parse completion" , parsedCompletion , this . mode )
166
+ throw error
149
167
}
150
168
}
151
169
@@ -173,13 +191,29 @@ class Instructor {
173
191
return validation . data
174
192
} catch ( error ) {
175
193
if ( attempts < max_retries ) {
176
- this . log ( "debug" , response_model . name , "Retrying, attempt: " , attempts )
177
- this . log ( "warn" , response_model . name , "Validation error: " , validationIssues )
194
+ this . log (
195
+ "debug" ,
196
+ `response model: ${ response_model . name } - Retrying, attempt: ` ,
197
+ attempts
198
+ )
199
+ this . log (
200
+ "warn" ,
201
+ `response model: ${ response_model . name } - Validation issues: ` ,
202
+ validationIssues
203
+ )
178
204
attempts ++
179
205
return await makeCompletionCallWithRetries ( )
180
206
} else {
181
- this . log ( "debug" , response_model . name , "Max attempts reached: " , attempts )
182
- this . log ( "error" , response_model . name , "Error: " , validationIssues )
207
+ this . log (
208
+ "debug" ,
209
+ `response model: ${ response_model . name } - Max attempts reached: ${ attempts } `
210
+ )
211
+ this . log (
212
+ "error" ,
213
+ `response model: ${ response_model . name } - Validation issues: ` ,
214
+ validationIssues
215
+ )
216
+
183
217
throw error
184
218
}
185
219
}
@@ -193,13 +227,15 @@ class Instructor {
193
227
response_model,
194
228
...params
195
229
} : ChatCompletionCreateParamsWithModel < T > ) : Promise <
196
- AsyncGenerator < Partial < T > & { _meta : CompletionMeta } , void , unknown >
230
+ AsyncGenerator < Partial < T > & { _meta ? : CompletionMeta } , void , unknown >
197
231
> {
198
232
if ( max_retries ) {
199
233
this . log ( "warn" , "max_retries is not supported for streaming completions" )
200
234
}
201
235
202
- const completionParams = withResponseModel ( {
236
+ const paramsTransformer = PROVIDER_PARAMS_TRANSFORMERS ?. [ this . provider ] ?. [ this . mode ]
237
+
238
+ let completionParams = withResponseModel ( {
203
239
params : {
204
240
...params ,
205
241
stream : true
@@ -208,13 +244,18 @@ class Instructor {
208
244
mode : this . mode
209
245
} )
210
246
247
+ if ( paramsTransformer ) {
248
+ completionParams = paramsTransformer ( completionParams )
249
+ }
250
+
211
251
const streamClient = new ZodStream ( {
212
252
debug : this . debug ?? false
213
253
} )
214
254
215
255
return streamClient . create ( {
216
256
completionPromise : async ( ) => {
217
257
const completion = await this . client . chat . completions . create ( completionParams )
258
+ this . log ( "debug" , "raw stream completion response: " , completion )
218
259
219
260
return OAIStream ( {
220
261
res : completion
0 commit comments