Skip to content

Commit 781632d

Browse files
HenryHengZJtenuar
authored andcommitted
Feature/update openai version, add reasoning effort param, add o3 mini (FlowiseAI#3973)
* update openai version, add reasoning effort param * update azure * add filter for pinecone llamaindex * update graph cypher qa chain
1 parent 3370b3d commit 781632d

File tree

24 files changed

+497
-288
lines changed

24 files changed

+497
-288
lines changed

package.json

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,9 @@
7171
},
7272
"resolutions": {
7373
"@google/generative-ai": "^0.15.0",
74-
"@langchain/core": "0.3.29",
74+
"@langchain/core": "0.3.37",
7575
"@qdrant/openapi-typescript-fetch": "1.2.6",
76-
"openai": "4.57.3",
76+
"openai": "4.82.0",
7777
"protobufjs": "7.4.0"
7878
},
7979
"eslintIgnore": [

packages/components/models.json

+12
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,14 @@
230230
{
231231
"name": "azureChatOpenAI",
232232
"models": [
233+
{
234+
"label": "o3-mini",
235+
"name": "o3-mini"
236+
},
237+
{
238+
"label": "o1",
239+
"name": "o1"
240+
},
233241
{
234242
"label": "o1-preview",
235243
"name": "o1-preview"
@@ -397,6 +405,10 @@
397405
{
398406
"name": "chatGoogleGenerativeAI",
399407
"models": [
408+
{
409+
"label": "gemini-2.0-flash-exp",
410+
"name": "gemini-2.0-flash-exp"
411+
},
400412
{
401413
"label": "gemini-1.5-flash-latest",
402414
"name": "gemini-1.5-flash-latest"

packages/components/nodes/chains/GraphCypherQAChain/GraphCypherQAChain.ts

+13-11
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class GraphCypherQA_Chain implements INode {
2323
constructor(fields?: { sessionId?: string }) {
2424
this.label = 'Graph Cypher QA Chain'
2525
this.name = 'graphCypherQAChain'
26-
this.version = 1.0
26+
this.version = 1.1
2727
this.type = 'GraphCypherQAChain'
2828
this.icon = 'graphqa.svg'
2929
this.category = 'Chains'
@@ -47,7 +47,8 @@ class GraphCypherQA_Chain implements INode {
4747
name: 'cypherPrompt',
4848
optional: true,
4949
type: 'BasePromptTemplate',
50-
description: 'Prompt template for generating Cypher queries. Must include {schema} and {question} variables'
50+
description:
51+
'Prompt template for generating Cypher queries. Must include {schema} and {question} variables. If not provided, default prompt will be used.'
5152
},
5253
{
5354
label: 'Cypher Generation Model',
@@ -61,7 +62,8 @@ class GraphCypherQA_Chain implements INode {
6162
name: 'qaPrompt',
6263
optional: true,
6364
type: 'BasePromptTemplate',
64-
description: 'Prompt template for generating answers. Must include {context} and {question} variables'
65+
description:
66+
'Prompt template for generating answers. Must include {context} and {question} variables. If not provided, default prompt will be used.'
6567
},
6668
{
6769
label: 'QA Model',
@@ -111,6 +113,10 @@ class GraphCypherQA_Chain implements INode {
111113
const returnDirect = nodeData.inputs?.returnDirect as boolean
112114
const output = nodeData.outputs?.output as string
113115

116+
if (!model) {
117+
throw new Error('Language Model is required')
118+
}
119+
114120
// Handle prompt values if they exist
115121
let cypherPromptTemplate: PromptTemplate | FewShotPromptTemplate | undefined
116122
let qaPromptTemplate: PromptTemplate | undefined
@@ -147,10 +153,6 @@ class GraphCypherQA_Chain implements INode {
147153
})
148154
}
149155

150-
if ((!cypherModel || !qaModel) && !model) {
151-
throw new Error('Language Model is required when Cypher Model or QA Model are not provided')
152-
}
153-
154156
// Validate required variables in prompts
155157
if (
156158
cypherPromptTemplate &&
@@ -165,13 +167,13 @@ class GraphCypherQA_Chain implements INode {
165167
returnDirect
166168
}
167169

168-
if (cypherModel && cypherPromptTemplate) {
169-
fromLLMInput['cypherLLM'] = cypherModel
170+
if (cypherPromptTemplate) {
171+
fromLLMInput['cypherLLM'] = cypherModel ?? model
170172
fromLLMInput['cypherPrompt'] = cypherPromptTemplate
171173
}
172174

173-
if (qaModel && qaPromptTemplate) {
174-
fromLLMInput['qaLLM'] = qaModel
175+
if (qaPromptTemplate) {
176+
fromLLMInput['qaLLM'] = qaModel ?? model
175177
fromLLMInput['qaPrompt'] = qaPromptTemplate
176178
}
177179

packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts

+35-10
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, ClientOptions, LegacyOpenAIInput } from '@langchain/openai'
1+
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
22
import { BaseCache } from '@langchain/core/caches'
3-
import { BaseLLMParams } from '@langchain/core/language_models/llms'
43
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
54
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
6-
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
75
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
8-
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
6+
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
97

108
const serverCredentialsExists =
119
!!process.env.AZURE_OPENAI_API_KEY &&
@@ -33,7 +31,7 @@ class AzureChatOpenAI_ChatModels implements INode {
3331
this.icon = 'Azure.svg'
3432
this.category = 'Chat Models'
3533
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
36-
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
34+
this.baseClasses = [this.type, ...getBaseClasses(LangchainAzureChatOpenAI)]
3735
this.credential = {
3836
label: 'Connect Credential',
3937
name: 'credential',
@@ -155,6 +153,29 @@ class AzureChatOpenAI_ChatModels implements INode {
155153
default: 'low',
156154
optional: false,
157155
additionalParams: true
156+
},
157+
{
158+
label: 'Reasoning Effort',
159+
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
160+
name: 'reasoningEffort',
161+
type: 'options',
162+
options: [
163+
{
164+
label: 'Low',
165+
name: 'low'
166+
},
167+
{
168+
label: 'Medium',
169+
name: 'medium'
170+
},
171+
{
172+
label: 'High',
173+
name: 'high'
174+
}
175+
],
176+
default: 'low',
177+
optional: false,
178+
additionalParams: true
158179
}
159180
]
160181
}
@@ -178,6 +199,7 @@ class AzureChatOpenAI_ChatModels implements INode {
178199
const topP = nodeData.inputs?.topP as string
179200
const basePath = nodeData.inputs?.basepath as string
180201
const baseOptions = nodeData.inputs?.baseOptions
202+
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
181203

182204
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
183205
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@@ -188,10 +210,7 @@ class AzureChatOpenAI_ChatModels implements INode {
188210
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
189211
const imageResolution = nodeData.inputs?.imageResolution as string
190212

191-
const obj: Partial<AzureOpenAIInput> &
192-
BaseLLMParams &
193-
Partial<OpenAIChatInput> &
194-
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
213+
const obj: ChatOpenAIFields & Partial<AzureOpenAIInput> = {
195214
temperature: parseFloat(temperature),
196215
modelName,
197216
azureOpenAIApiKey,
@@ -218,6 +237,12 @@ class AzureChatOpenAI_ChatModels implements INode {
218237
console.error('Error parsing base options', exception)
219238
}
220239
}
240+
if (modelName === 'o3-mini') {
241+
delete obj.temperature
242+
}
243+
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
244+
obj.reasoningEffort = reasoningEffort
245+
}
221246

222247
const multiModalOption: IMultiModalOption = {
223248
image: {
@@ -226,7 +251,7 @@ class AzureChatOpenAI_ChatModels implements INode {
226251
}
227252
}
228253

229-
const model = new ChatOpenAI(nodeData.id, obj)
254+
const model = new AzureChatOpenAI(nodeData.id, obj)
230255
model.setMultiModalOption(multiModalOption)
231256
return model
232257
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import { AzureChatOpenAI as LangchainAzureChatOpenAI, OpenAIChatInput, AzureOpenAIInput, ClientOptions } from '@langchain/openai'
2+
import { IMultiModalOption, IVisionChatModal } from '../../../src'
3+
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
4+
5+
export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVisionChatModal {
6+
configuredModel: string
7+
configuredMaxToken?: number
8+
multiModalOption: IMultiModalOption
9+
id: string
10+
11+
constructor(
12+
id: string,
13+
fields?: Partial<OpenAIChatInput> &
14+
Partial<AzureOpenAIInput> & {
15+
openAIApiKey?: string
16+
openAIApiVersion?: string
17+
openAIBasePath?: string
18+
deploymentName?: string
19+
} & BaseChatModelParams & {
20+
configuration?: ClientOptions
21+
}
22+
) {
23+
super(fields)
24+
this.id = id
25+
this.configuredModel = fields?.modelName ?? ''
26+
this.configuredMaxToken = fields?.maxTokens
27+
}
28+
29+
revertToOriginalModel(): void {
30+
this.modelName = this.configuredModel
31+
this.maxTokens = this.configuredMaxToken
32+
}
33+
34+
setMultiModalOption(multiModalOption: IMultiModalOption): void {
35+
this.multiModalOption = multiModalOption
36+
}
37+
38+
setVisionModel(): void {
39+
// pass
40+
}
41+
}

packages/components/nodes/chatmodels/ChatCerebras/ChatCerebras.ts

+11-7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
1+
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
22
import { BaseCache } from '@langchain/core/caches'
3-
import { BaseLLMParams } from '@langchain/core/language_models/llms'
43
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
54
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
65

@@ -135,7 +134,7 @@ class ChatCerebras_ChatModels implements INode {
135134
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
136135
const cerebrasAIApiKey = getCredentialParam('cerebrasApiKey', credentialData, nodeData)
137136

138-
const obj: Partial<OpenAIChatInput> & BaseLLMParams = {
137+
const obj: ChatOpenAIFields = {
139138
temperature: parseFloat(temperature),
140139
modelName,
141140
openAIApiKey: cerebrasAIApiKey,
@@ -158,10 +157,15 @@ class ChatCerebras_ChatModels implements INode {
158157
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
159158
}
160159
}
161-
const model = new ChatOpenAI(obj, {
162-
basePath,
163-
baseOptions: parsedBaseOptions
164-
})
160+
161+
if (basePath || parsedBaseOptions) {
162+
obj.configuration = {
163+
baseURL: basePath,
164+
defaultHeaders: parsedBaseOptions
165+
}
166+
}
167+
168+
const model = new ChatOpenAI(obj)
165169
return model
166170
}
167171
}

packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai'
1+
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
22
import { BaseCache } from '@langchain/core/caches'
3-
import { BaseLLMParams } from '@langchain/core/language_models/llms'
43
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
54
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
65

@@ -108,7 +107,7 @@ class ChatLocalAI_ChatModels implements INode {
108107

109108
const cache = nodeData.inputs?.cache as BaseCache
110109

111-
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
110+
const obj: ChatOpenAIFields = {
112111
temperature: parseFloat(temperature),
113112
modelName,
114113
openAIApiKey: 'sk-',
@@ -120,8 +119,9 @@ class ChatLocalAI_ChatModels implements INode {
120119
if (timeout) obj.timeout = parseInt(timeout, 10)
121120
if (cache) obj.cache = cache
122121
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
122+
if (basePath) obj.configuration = { baseURL: basePath }
123123

124-
const model = new ChatOpenAI(obj, { basePath })
124+
const model = new ChatOpenAI(obj)
125125

126126
return model
127127
}

packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts

+11-8
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
1+
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
22
import { BaseCache } from '@langchain/core/caches'
3-
import { BaseLLMParams } from '@langchain/core/language_models/llms'
43
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
54
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
65

@@ -134,7 +133,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
134133
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
135134
const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData)
136135

137-
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { nvdiaNIMApiKey?: string } = {
136+
const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = {
138137
temperature: parseFloat(temperature),
139138
modelName,
140139
openAIApiKey: nvdiaNIMApiKey,
@@ -154,14 +153,18 @@ class ChatNvdiaNIM_ChatModels implements INode {
154153
try {
155154
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
156155
} catch (exception) {
157-
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
156+
throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception)
158157
}
159158
}
160159

161-
const model = new ChatOpenAI(obj, {
162-
basePath,
163-
baseOptions: parsedBaseOptions
164-
})
160+
if (basePath || parsedBaseOptions) {
161+
obj.configuration = {
162+
baseURL: basePath,
163+
defaultHeaders: parsedBaseOptions
164+
}
165+
}
166+
167+
const model = new ChatOpenAI(obj)
165168
return model
166169
}
167170
}

0 commit comments

Comments
 (0)