Skip to content

Commit c205286

Browse files
authored
updating deps + picking up stream meta and types (#125)
1 parent 176ecc1 commit c205286

File tree

6 files changed

+25
-7
lines changed

6 files changed

+25
-7
lines changed

.changeset/small-tomatoes-scream.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@instructor-ai/instructor": major
3+
---
4+
5+
Updating zod-stream major and stream output types - this change moves the internal properties tacked onto the stream output from many \_properties to one \_meta object with the properties nested - this also adds explicit types so when used in ts projects it doesnt yell.

bun.lockb

0 Bytes
Binary file not shown.

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
},
5252
"homepage": "https://github.com/instructor-ai/instructor-js#readme",
5353
"dependencies": {
54-
"zod-stream": "0.0.8",
54+
"zod-stream": "1.0.0",
5555
"zod-validation-error": "^2.1.0"
5656
},
5757
"peerDependencies": {

src/instructor.ts

+10-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,13 @@ import {
66
} from "@/types"
77
import OpenAI from "openai"
88
import { z } from "zod"
9-
import ZodStream, { OAIResponseParser, OAIStream, withResponseModel, type Mode } from "zod-stream"
9+
import ZodStream, {
10+
CompletionMeta,
11+
OAIResponseParser,
12+
OAIStream,
13+
withResponseModel,
14+
type Mode
15+
} from "zod-stream"
1016
import { fromZodError } from "zod-validation-error"
1117

1218
import {
@@ -186,7 +192,9 @@ class Instructor {
186192
max_retries,
187193
response_model,
188194
...params
189-
}: ChatCompletionCreateParamsWithModel<T>): Promise<AsyncGenerator<Partial<T>, void, unknown>> {
195+
}: ChatCompletionCreateParamsWithModel<T>): Promise<
196+
AsyncGenerator<Partial<T> & { _meta: CompletionMeta }, void, unknown>
197+
> {
190198
if (max_retries) {
191199
this.log("warn", "max_retries is not supported for streaming completions")
192200
}

src/types/index.ts

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
import OpenAI from "openai"
22
import { Stream } from "openai/streaming"
33
import { z } from "zod"
4-
import { type Mode as ZMode, type ResponseModel as ZResponseModel } from "zod-stream"
4+
import {
5+
CompletionMeta,
6+
type Mode as ZMode,
7+
type ResponseModel as ZResponseModel
8+
} from "zod-stream"
59

610
export type LogLevel = "debug" | "info" | "warn" | "error"
711

@@ -33,7 +37,7 @@ export type ReturnTypeBasedOnParams<P> =
3337
response_model: ResponseModel<infer T>
3438
}
3539
) ?
36-
Promise<AsyncGenerator<Partial<z.infer<T>>, void, unknown>>
40+
Promise<AsyncGenerator<Partial<z.infer<T>> & { _meta: CompletionMeta }, void, unknown>>
3741
: P extends { response_model: ResponseModel<infer T> } ? Promise<z.infer<T>>
3842
: P extends { stream: true } ? Stream<OpenAI.Chat.Completions.ChatCompletionChunk>
3943
: OpenAI.Chat.Completions.ChatCompletion

tests/inference.test.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import OpenAI from "openai"
1212
import { Stream } from "openai/streaming"
1313
import { type } from "ts-inference-check"
1414
import { z } from "zod"
15+
import { CompletionMeta } from "zod-stream"
1516

1617
describe("Inference Checking", () => {
1718
const UserSchema = z.object({
@@ -78,7 +79,7 @@ describe("Inference Checking", () => {
7879
Partial<{
7980
name: string
8081
age: number
81-
}>,
82+
}> & { _meta: CompletionMeta },
8283
void,
8384
unknown
8485
>
@@ -102,7 +103,7 @@ describe("Inference Checking", () => {
102103
Partial<{
103104
name: string
104105
age: number
105-
}>,
106+
}> & { _meta: CompletionMeta },
106107
void,
107108
unknown
108109
>

0 commit comments

Comments
 (0)