Skip to content

Commit 78a1aac

Browse files
committed
♻️ refactor: refactor the hidden to enabled
1 parent 6dc9884 commit 78a1aac

21 files changed

+100
-94
lines changed

src/config/modelProviders/anthropic.ts

+6-1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ const Anthropic: ModelProviderCard = {
66
description:
77
'Ideal balance of intelligence and speed for enterprise workloads. Maximum utility at a lower price, dependable, balanced for scaled deployments',
88
displayName: 'Claude 3 Sonnet',
9+
enabled: true,
910
id: 'claude-3-sonnet-20240229',
1011
maxOutput: 4096,
1112
tokens: 200_000,
@@ -15,6 +16,7 @@ const Anthropic: ModelProviderCard = {
1516
description:
1617
'Most powerful model for highly complex tasks. Top-level performance, intelligence, fluency, and understanding',
1718
displayName: 'Claude 3 Opus',
19+
enabled: true,
1820
id: 'claude-3-opus-20240229',
1921
maxOutput: 4096,
2022
tokens: 200_000,
@@ -24,26 +26,29 @@ const Anthropic: ModelProviderCard = {
2426
description:
2527
'Fastest and most compact model for near-instant responsiveness. Quick and accurate targeted performance',
2628
displayName: 'Claude 3 Haiku',
29+
enabled: true,
2730
id: 'claude-3-haiku-20240307',
2831
maxOutput: 4096,
2932
tokens: 200_000,
3033
vision: true,
3134
},
3235
{
3336
displayName: 'Claude 2.1',
37+
enabled: false,
3438
id: 'claude-2.1',
3539
maxOutput: 4096,
3640
tokens: 200_000,
3741
},
3842
{
3943
displayName: 'Claude 2.0',
44+
enabled: false,
4045
id: 'claude-2.0',
4146
maxOutput: 4096,
4247
tokens: 100_000,
4348
},
4449
{
4550
displayName: 'Claude Instant 1.2',
46-
hidden: true,
51+
enabled: false,
4752
id: 'claude-instant-1.2',
4853
maxOutput: 4096,
4954
tokens: 100_000,

src/config/modelProviders/bedrock.ts

+4-1
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,14 @@ const Bedrock: ModelProviderCard = {
66
description:
77
'Amazon Titan Text G1 - Express v1,上下文长度可达 8000 个 token,适合广泛的用途。',
88
displayName: 'Titan Text G1 - Express',
9-
hidden: true,
109
id: 'amazon.titan-text-express-v1:0:8k',
1110
tokens: 8000,
1211
},
1312
{
1413
description:
1514
'Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过精心设计,是大规模部署人工智能的可信赖、高耐久性骨干模型。 Claude 3 Sonnet 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
1615
displayName: 'Claude 3 Sonnet',
16+
enabled: true,
1717
id: 'anthropic.claude-3-sonnet-20240229-v1:0',
1818
tokens: 200_000,
1919
vision: true,
@@ -22,6 +22,7 @@ const Bedrock: ModelProviderCard = {
2222
description:
2323
'Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
2424
displayName: 'Claude 3 Haiku',
25+
enabled: true,
2526
id: 'anthropic.claude-3-haiku-20240307-v1:0',
2627
tokens: 200_000,
2728
vision: true,
@@ -43,12 +44,14 @@ const Bedrock: ModelProviderCard = {
4344
{
4445
description: 'Llama 2 Chat 13B v1,上下文大小为 4k,Llama 2 模型的对话用例优化变体。',
4546
displayName: 'Llama 2 Chat 13B',
47+
enabled: true,
4648
id: 'meta.llama2-13b-chat-v1',
4749
tokens: 4000,
4850
},
4951
{
5052
description: 'Llama 2 Chat 70B v1,上下文大小为 4k,Llama 2 模型的对话用例优化变体。',
5153
displayName: 'Llama 2 Chat 70B',
54+
enabled: true,
5255
id: 'meta.llama2-70b-chat-v1',
5356
tokens: 4000,
5457
},

src/config/modelProviders/google.ts

+2-5
Original file line numberDiff line numberDiff line change
@@ -5,28 +5,28 @@ const Google: ModelProviderCard = {
55
{
66
description: 'A legacy text-only model optimized for chat conversations',
77
displayName: 'PaLM 2 Chat (Legacy)',
8-
hidden: true,
98
id: 'chat-bison-001',
109
maxOutput: 1024,
1110
},
1211
{
1312
description: 'A legacy model that understands text and generates text as an output',
1413
displayName: 'PaLM 2 (Legacy)',
15-
hidden: true,
1614
id: 'text-bison-001',
1715
maxOutput: 1024,
1816
tokens: 9220,
1917
},
2018
{
2119
description: 'The best model for scaling across a wide range of tasks',
2220
displayName: 'Gemini 1.0 Pro',
21+
enabled: true,
2322
id: 'gemini-pro',
2423
maxOutput: 2048,
2524
tokens: 32_768,
2625
},
2726
{
2827
description: 'The best image understanding model to handle a broad range of applications',
2928
displayName: 'Gemini 1.0 Pro Vision',
29+
enabled: true,
3030
id: 'gemini-1.0-pro-vision-latest',
3131
maxOutput: 4096,
3232
tokens: 16_384,
@@ -35,7 +35,6 @@ const Google: ModelProviderCard = {
3535
{
3636
description: 'The best image understanding model to handle a broad range of applications',
3737
displayName: 'Gemini 1.0 Pro Vision',
38-
hidden: true,
3938
id: 'gemini-pro-vision',
4039
maxOutput: 4096,
4140
tokens: 16_384,
@@ -45,7 +44,6 @@ const Google: ModelProviderCard = {
4544
description:
4645
'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
4746
displayName: 'Gemini 1.0 Pro 001 (Tuning)',
48-
hidden: true,
4947
id: 'gemini-1.0-pro-001',
5048
maxOutput: 2048,
5149
tokens: 32_768,
@@ -54,7 +52,6 @@ const Google: ModelProviderCard = {
5452
description:
5553
'The best model for scaling across a wide range of tasks. This is the latest model.',
5654
displayName: 'Gemini 1.0 Pro Latest',
57-
hidden: true,
5855
id: 'gemini-1.0-pro-latest',
5956
maxOutput: 2048,
6057
tokens: 32_768,

src/config/modelProviders/groq.ts

+3
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,19 @@ const Groq: ModelProviderCard = {
44
chatModels: [
55
{
66
displayName: 'Mixtral-8x7b-Instruct-v0.1',
7+
enabled: true,
78
id: 'mixtral-8x7b-32768',
89
tokens: 32_768,
910
},
1011
{
1112
displayName: 'Gemma-7b-it',
13+
enabled: true,
1214
id: 'gemma-7b-it',
1315
tokens: 8192,
1416
},
1517
{
1618
displayName: 'LLaMA2-70b-chat',
19+
enabled: true,
1720
id: 'llama2-70b-4096',
1821
tokens: 4096,
1922
},

src/config/modelProviders/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
3131
].flat();
3232

3333
export const filterEnabledModels = (provider: ModelProviderCard) => {
34-
return provider.chatModels.filter((v) => !v.hidden).map((m) => m.id);
34+
return provider.chatModels.filter((v) => v.enabled).map((m) => m.id);
3535
};
3636

3737
export { default as AnthropicProvider } from './anthropic';

src/config/modelProviders/mistral.ts

+5
Original file line numberDiff line numberDiff line change
@@ -4,26 +4,31 @@ const Mistral: ModelProviderCard = {
44
chatModels: [
55
{
66
displayName: 'Mistral 7B',
7+
enabled: true,
78
id: 'open-mistral-7b',
89
tokens: 32_768,
910
},
1011
{
1112
displayName: 'Mixtral 8x7B',
13+
enabled: true,
1214
id: 'open-mixtral-8x7b',
1315
tokens: 32_768,
1416
},
1517
{
1618
displayName: 'Mistral Small (2402)',
19+
enabled: true,
1720
id: 'mistral-small-2402',
1821
tokens: 32_768,
1922
},
2023
{
2124
displayName: 'Mistral Medium (2312)',
25+
enabled: true,
2226
id: 'mistral-medium-2312',
2327
tokens: 32_768,
2428
},
2529
{
2630
displayName: 'Mistral Large (2402)',
31+
enabled: true,
2732
id: 'mistral-large-2402',
2833
tokens: 32_768,
2934
},

src/config/modelProviders/moonshot.ts

+3-1
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,25 @@ const Moonshot: ModelProviderCard = {
44
chatModels: [
55
{
66
displayName: 'Moonshot V1 8K',
7+
enabled: true,
78
id: 'moonshot-v1-8k',
89
tokens: 8192,
910
},
1011
{
1112
displayName: 'Moonshot V1 32K',
13+
enabled: true,
1214
id: 'moonshot-v1-32k',
1315
tokens: 32_768,
1416
},
1517
{
1618
displayName: 'Moonshot V1 128K',
19+
enabled: true,
1720
id: 'moonshot-v1-128k',
1821
tokens: 128_000,
1922
},
2023
{
2124
displayName: 'Moonshot Kimi Reverse',
2225
files: true,
23-
hidden: true,
2426
id: 'moonshot-v1',
2527
tokens: 200_000,
2628
vision: true,

0 commit comments

Comments
 (0)