Skip to content

Commit c398063

Browse files
committed
♻️ refactor: refactor the server config to migrate model provider env
1 parent 199ded2 commit c398063

File tree

12 files changed

+490
-414
lines changed

12 files changed

+490
-414
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2+
3+
exports[`GET /api/config > Model Provider env > CUSTOM_MODELS > custom deletion, addition, and renaming of models 1`] = `
4+
[
5+
{
6+
"displayName": "llama",
7+
"enabled": true,
8+
"functionCall": true,
9+
"id": "llama",
10+
"vision": true,
11+
},
12+
{
13+
"displayName": "claude-2",
14+
"enabled": true,
15+
"functionCall": true,
16+
"id": "claude-2",
17+
"vision": true,
18+
},
19+
{
20+
"displayName": "gpt-4-32k",
21+
"enabled": true,
22+
"functionCall": true,
23+
"id": "gpt-4-0125-preview",
24+
"tokens": 128000,
25+
},
26+
]
27+
`;
28+
29+
exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
30+
[
31+
{
32+
"displayName": "llama",
33+
"enabled": true,
34+
"functionCall": true,
35+
"id": "llama",
36+
"vision": true,
37+
},
38+
{
39+
"displayName": "claude-2",
40+
"enabled": true,
41+
"functionCall": true,
42+
"id": "claude-2",
43+
"vision": true,
44+
},
45+
{
46+
"displayName": "gpt-4-32k",
47+
"enabled": true,
48+
"functionCall": true,
49+
"id": "gpt-4-0125-preview",
50+
"tokens": 128000,
51+
},
52+
]
53+
`;
54+
55+
exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = `
56+
[
57+
{
58+
"displayName": "GPT-3.5 Turbo (1106)",
59+
"enabled": true,
60+
"functionCall": true,
61+
"id": "gpt-3.5-turbo-1106",
62+
"tokens": 16385,
63+
},
64+
{
65+
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
66+
"displayName": "GPT-3.5 Turbo",
67+
"enabled": true,
68+
"functionCall": true,
69+
"id": "gpt-3.5-turbo",
70+
"tokens": 16385,
71+
},
72+
{
73+
"displayName": "GPT-3.5 Turbo 16K",
74+
"enabled": true,
75+
"id": "gpt-3.5-turbo-16k",
76+
"tokens": 16385,
77+
},
78+
{
79+
"displayName": "GPT-4",
80+
"enabled": true,
81+
"functionCall": true,
82+
"id": "gpt-4",
83+
"tokens": 8192,
84+
},
85+
{
86+
"displayName": "GPT-4 32K",
87+
"enabled": true,
88+
"functionCall": true,
89+
"id": "gpt-4-32k",
90+
"tokens": 32768,
91+
},
92+
{
93+
"displayName": "GPT-4 Turbo Preview (1106)",
94+
"enabled": true,
95+
"functionCall": true,
96+
"id": "gpt-4-1106-preview",
97+
"tokens": 128000,
98+
},
99+
{
100+
"description": "GPT-4 视觉预览版,支持视觉任务",
101+
"displayName": "GPT-4 Turbo Vision Preview",
102+
"enabled": true,
103+
"id": "gpt-4-vision-preview",
104+
"tokens": 128000,
105+
"vision": true,
106+
},
107+
]
108+
`;
109+
110+
exports[`GET /api/config > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
111+
[
112+
{
113+
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
114+
"displayName": "GPT-3.5 Turbo",
115+
"enabled": true,
116+
"functionCall": true,
117+
"id": "gpt-3.5-turbo",
118+
"tokens": 16385,
119+
},
120+
{
121+
"displayName": "GPT-3.5 Turbo (0125)",
122+
"functionCall": true,
123+
"id": "gpt-3.5-turbo-0125",
124+
"tokens": 16385,
125+
},
126+
{
127+
"displayName": "GPT-3.5 Turbo (1106)",
128+
"functionCall": true,
129+
"id": "gpt-3.5-turbo-1106",
130+
"tokens": 16385,
131+
},
132+
{
133+
"displayName": "GPT-3.5 Turbo Instruct",
134+
"id": "gpt-3.5-turbo-instruct",
135+
"tokens": 4096,
136+
},
137+
{
138+
"displayName": "GPT-3.5 Turbo 16K",
139+
"id": "gpt-3.5-turbo-16k",
140+
"tokens": 16385,
141+
},
142+
{
143+
"displayName": "GPT-3.5 Turbo (0613)",
144+
"id": "gpt-3.5-turbo-0613",
145+
"legacy": true,
146+
"tokens": 4096,
147+
},
148+
{
149+
"displayName": "GPT-3.5 Turbo 16K (0613)",
150+
"id": "gpt-3.5-turbo-16k-0613",
151+
"legacy": true,
152+
"tokens": 4096,
153+
},
154+
{
155+
"displayName": "GPT-4 Turbo Preview",
156+
"enabled": true,
157+
"functionCall": true,
158+
"id": "gpt-4-turbo-preview",
159+
"tokens": 128000,
160+
},
161+
{
162+
"displayName": "GPT-4 Turbo Preview (0125)",
163+
"functionCall": true,
164+
"id": "gpt-4-0125-preview",
165+
"tokens": 128000,
166+
},
167+
{
168+
"description": "GPT-4 视觉预览版,支持视觉任务",
169+
"displayName": "GPT-4 Turbo Vision Preview",
170+
"enabled": true,
171+
"id": "gpt-4-vision-preview",
172+
"tokens": 128000,
173+
"vision": true,
174+
},
175+
{
176+
"displayName": "GPT-4 Turbo Preview (1106)",
177+
"functionCall": true,
178+
"id": "gpt-4-1106-preview",
179+
"tokens": 128000,
180+
},
181+
{
182+
"displayName": "GPT-4",
183+
"functionCall": true,
184+
"id": "gpt-4",
185+
"tokens": 8192,
186+
},
187+
{
188+
"displayName": "GPT-4 (0613)",
189+
"functionCall": true,
190+
"id": "gpt-4-0613",
191+
"tokens": 8192,
192+
},
193+
{
194+
"displayName": "GPT-4 32K",
195+
"functionCall": true,
196+
"id": "gpt-4-32k",
197+
"tokens": 32768,
198+
},
199+
{
200+
"displayName": "GPT-4 32K (0613)",
201+
"functionCall": true,
202+
"id": "gpt-4-32k-0613",
203+
"tokens": 32768,
204+
},
205+
{
206+
"displayName": "GPT-4 ALL",
207+
"files": true,
208+
"functionCall": true,
209+
"id": "gpt-4-all",
210+
"tokens": 32768,
211+
"vision": true,
212+
},
213+
]
214+
`;

src/app/api/config/route.test.ts

+172
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
import { beforeEach, describe, expect, it, vi } from 'vitest';
2+
3+
import { OllamaProvider, OpenRouterProvider, TogetherAIProvider } from '@/config/modelProviders';
4+
import { getServerConfig } from '@/config/server';
5+
import { GlobalServerConfig } from '@/types/settings';
6+
7+
import { GET } from './route';
8+
9+
beforeEach(() => {
10+
vi.resetAllMocks();
11+
});
12+
13+
describe('GET /api/config', () => {
14+
describe('Model Provider env', () => {
15+
describe('OPENAI_MODEL_LIST', () => {
16+
it('custom deletion, addition, and renaming of models', async () => {
17+
process.env.OPENAI_MODEL_LIST =
18+
'-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k';
19+
20+
const response = await GET();
21+
22+
// Assert
23+
expect(response).toBeInstanceOf(Response);
24+
expect(response.status).toBe(200);
25+
26+
const jsonResponse: GlobalServerConfig = await response.json();
27+
28+
const result = jsonResponse.languageModel?.openai?.serverModelCards;
29+
30+
expect(result).toMatchSnapshot();
31+
process.env.OPENAI_MODEL_LIST = '';
32+
});
33+
34+
it('should work correct with gpt-4', async () => {
35+
process.env.OPENAI_MODEL_LIST =
36+
'-all,+gpt-3.5-turbo-1106,+gpt-3.5-turbo,+gpt-3.5-turbo-16k,+gpt-4,+gpt-4-32k,+gpt-4-1106-preview,+gpt-4-vision-preview';
37+
38+
const response = await GET();
39+
const jsonResponse: GlobalServerConfig = await response.json();
40+
41+
const result = jsonResponse.languageModel?.openai?.serverModelCards;
42+
43+
expect(result).toMatchSnapshot();
44+
45+
process.env.OPENAI_MODEL_LIST = '';
46+
});
47+
48+
it('duplicate naming model', async () => {
49+
process.env.OPENAI_MODEL_LIST =
50+
'gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k';
51+
52+
const response = await GET();
53+
const jsonResponse: GlobalServerConfig = await response.json();
54+
55+
const result = jsonResponse.languageModel?.openai?.serverModelCards;
56+
57+
expect(result?.find((s) => s.id === 'gpt-4-0125-preview')?.displayName).toEqual(
58+
'gpt-4-32k',
59+
);
60+
61+
process.env.OPENAI_MODEL_LIST = '';
62+
});
63+
64+
it('should delete model', async () => {
65+
process.env.OPENAI_MODEL_LIST = '-gpt-4';
66+
67+
const res = await GET();
68+
const data: GlobalServerConfig = await res.json();
69+
70+
const result = data.languageModel?.openai?.serverModelCards;
71+
72+
expect(result?.find((r) => r.id === 'gpt-4')).toBeUndefined();
73+
74+
process.env.OPENAI_MODEL_LIST = '';
75+
});
76+
77+
it('show the hidden model', async () => {
78+
process.env.OPENAI_MODEL_LIST = '+gpt-4-1106-preview';
79+
80+
const res = await GET();
81+
const data: GlobalServerConfig = await res.json();
82+
83+
const result = data.languageModel?.openai?.serverModelCards;
84+
85+
expect(result?.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({
86+
displayName: 'GPT-4 Turbo Preview (1106)',
87+
functionCall: true,
88+
enabled: true,
89+
id: 'gpt-4-1106-preview',
90+
tokens: 128000,
91+
});
92+
93+
process.env.OPENAI_MODEL_LIST = '';
94+
});
95+
96+
it('only add the model', async () => {
97+
process.env.OPENAI_MODEL_LIST = 'model1,model2,model3,model4';
98+
99+
const res = await GET();
100+
const data: GlobalServerConfig = await res.json();
101+
102+
const result = data.languageModel?.openai?.serverModelCards;
103+
104+
expect(result).toContainEqual({
105+
displayName: 'model1',
106+
functionCall: true,
107+
id: 'model1',
108+
enabled: true,
109+
vision: true,
110+
});
111+
expect(result).toContainEqual({
112+
displayName: 'model2',
113+
functionCall: true,
114+
enabled: true,
115+
id: 'model2',
116+
vision: true,
117+
});
118+
expect(result).toContainEqual({
119+
displayName: 'model3',
120+
enabled: true,
121+
functionCall: true,
122+
id: 'model3',
123+
vision: true,
124+
});
125+
expect(result).toContainEqual({
126+
displayName: 'model4',
127+
functionCall: true,
128+
enabled: true,
129+
id: 'model4',
130+
vision: true,
131+
});
132+
133+
process.env.OPENAI_MODEL_LIST = '';
134+
});
135+
});
136+
137+
describe('CUSTOM_MODELS', () => {
138+
it('custom deletion, addition, and renaming of models', async () => {
139+
process.env.CUSTOM_MODELS =
140+
'-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k';
141+
142+
const response = await GET();
143+
144+
// Assert
145+
expect(response).toBeInstanceOf(Response);
146+
expect(response.status).toBe(200);
147+
148+
const jsonResponse: GlobalServerConfig = await response.json();
149+
150+
const result = jsonResponse.languageModel?.openai?.serverModelCards;
151+
152+
expect(result).toMatchSnapshot();
153+
});
154+
});
155+
156+
describe('OPENROUTER_MODEL_LIST', () => {
157+
it('custom deletion, addition, and renaming of models', async () => {
158+
process.env.OPENROUTER_MODEL_LIST =
159+
'-all,+google/gemma-7b-it,+mistralai/mistral-7b-instruct=Mistral-7B-Instruct';
160+
161+
const res = await GET();
162+
const data: GlobalServerConfig = await res.json();
163+
164+
const result = data.languageModel?.openai?.serverModelCards;
165+
166+
expect(result).toMatchSnapshot();
167+
168+
process.env.OPENROUTER_MODEL_LIST = '';
169+
});
170+
});
171+
});
172+
});

0 commit comments

Comments
 (0)