Skip to content

Commit 51e5591

Browse files
authored
Feature/Add multi modal to chat ollama (FlowiseAI#3499)
* add multi modal to chat ollama * update JSON mode description
1 parent 1e2dc03 commit 51e5591

File tree

6 files changed

+60
-1001
lines changed

6 files changed

+60
-1001
lines changed

packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1-
import { ChatOllama, ChatOllamaInput } from '@langchain/ollama'
1+
import { ChatOllamaInput } from '@langchain/ollama'
22
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
33
import { BaseCache } from '@langchain/core/caches'
4-
import { INode, INodeData, INodeParams } from '../../../src/Interface'
4+
import { IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
55
import { getBaseClasses } from '../../../src/utils'
6+
import { ChatOllama } from './FlowiseChatOllama'
67

78
class ChatOllama_ChatModels implements INode {
89
label: string
@@ -19,7 +20,7 @@ class ChatOllama_ChatModels implements INode {
1920
constructor() {
2021
this.label = 'ChatOllama'
2122
this.name = 'chatOllama'
22-
this.version = 3.0
23+
this.version = 4.0
2324
this.type = 'ChatOllama'
2425
this.icon = 'Ollama.svg'
2526
this.category = 'Chat Models'
@@ -54,6 +55,23 @@ class ChatOllama_ChatModels implements INode {
5455
default: 0.9,
5556
optional: true
5657
},
58+
{
59+
label: 'Allow Image Uploads',
60+
name: 'allowImageUploads',
61+
type: 'boolean',
62+
description: 'Allow image uploads for multimodal models. e.g. llama3.2-vision',
63+
default: false,
64+
optional: true
65+
},
66+
{
67+
label: 'JSON Mode',
68+
name: 'jsonMode',
69+
type: 'boolean',
70+
description:
71+
'Coerces model outputs to only return JSON. Specify in the system prompt to return JSON. Ex: Format all responses as JSON object',
72+
optional: true,
73+
additionalParams: true
74+
},
5775
{
5876
label: 'Keep Alive',
5977
name: 'keepAlive',
@@ -203,6 +221,8 @@ class ChatOllama_ChatModels implements INode {
203221
const repeatLastN = nodeData.inputs?.repeatLastN as string
204222
const repeatPenalty = nodeData.inputs?.repeatPenalty as string
205223
const tfsZ = nodeData.inputs?.tfsZ as string
224+
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
225+
const jsonMode = nodeData.inputs?.jsonMode as boolean
206226

207227
const cache = nodeData.inputs?.cache as BaseCache
208228

@@ -225,8 +245,16 @@ class ChatOllama_ChatModels implements INode {
225245
if (tfsZ) obj.tfsZ = parseFloat(tfsZ)
226246
if (keepAlive) obj.keepAlive = keepAlive
227247
if (cache) obj.cache = cache
248+
if (jsonMode) obj.format = 'json'
249+
250+
const multiModalOption: IMultiModalOption = {
251+
image: {
252+
allowImageUploads: allowImageUploads ?? false
253+
}
254+
}
228255

229-
const model = new ChatOllama(obj)
256+
const model = new ChatOllama(nodeData.id, obj)
257+
model.setMultiModalOption(multiModalOption)
230258
return model
231259
}
232260
}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import { ChatOllama as LCChatOllama, ChatOllamaInput } from '@langchain/ollama'
2+
import { IMultiModalOption, IVisionChatModal } from '../../../src'
3+
4+
export class ChatOllama extends LCChatOllama implements IVisionChatModal {
5+
configuredModel: string
6+
configuredMaxToken?: number
7+
multiModalOption: IMultiModalOption
8+
id: string
9+
10+
constructor(id: string, fields?: ChatOllamaInput) {
11+
super(fields)
12+
this.id = id
13+
this.configuredModel = fields?.model ?? ''
14+
}
15+
16+
revertToOriginalModel(): void {
17+
this.model = this.configuredModel
18+
}
19+
20+
setMultiModalOption(multiModalOption: IMultiModalOption): void {
21+
this.multiModalOption = multiModalOption
22+
}
23+
24+
setVisionModel(): void {
25+
// pass
26+
}
27+
}

0 commit comments

Comments
 (0)