Skip to content

Commit 56032e6

Browse files
committed
✨ feat: support openai model fetcher
1 parent ef5ee2a commit 56032e6

File tree

32 files changed

+653
-118
lines changed

32 files changed

+653
-118
lines changed

src/app/api/chat/agentRuntime.ts

+4
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,10 @@ class AgentRuntime {
106106
});
107107
}
108108

109+
async models() {
110+
return this._runtime.models?.();
111+
}
112+
109113
static async initializeWithUserPayload(provider: string, payload: JWTPayload) {
110114
let runtimeModel: LobeRuntimeAI;
111115

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { NextResponse } from 'next/server';
2+
3+
import { getPreferredRegion } from '@/app/api/config';
4+
import { createErrorResponse } from '@/app/api/errorResponse';
5+
import { ChatCompletionErrorPayload } from '@/libs/agent-runtime';
6+
import { ChatErrorType } from '@/types/fetch';
7+
8+
import AgentRuntime from '../../agentRuntime';
9+
import { checkAuth } from '../../auth';
10+
11+
export const runtime = 'edge';
12+
13+
export const preferredRegion = getPreferredRegion();
14+
15+
export const GET = checkAuth(async (req, { params, jwtPayload }) => {
16+
const { provider } = params;
17+
18+
try {
19+
const agentRuntime = await AgentRuntime.initializeWithUserPayload(provider, jwtPayload);
20+
21+
const list = await agentRuntime.models();
22+
23+
return NextResponse.json(list);
24+
} catch (e) {
25+
const {
26+
errorType = ChatErrorType.InternalServerError,
27+
error: errorContent,
28+
...res
29+
} = e as ChatCompletionErrorPayload;
30+
31+
const error = errorContent || e;
32+
// track the error at server side
33+
console.error(`Route: [${provider}] ${errorType}:`, error);
34+
35+
return createErrorResponse(errorType, { error, ...res, provider });
36+
}
37+
});

src/app/api/config/route.test.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { beforeEach, describe, expect, it, vi } from 'vitest';
22

3-
import { GlobalServerConfig } from '@/types/settings';
3+
import { GlobalServerConfig } from '@/types/serverConfig';
44

55
import { GET } from './route';
66

src/app/api/config/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import {
44
TogetherAIProviderCard,
55
} from '@/config/modelProviders';
66
import { getServerConfig } from '@/config/server';
7-
import { GlobalServerConfig } from '@/types/settings';
7+
import { GlobalServerConfig } from '@/types/serverConfig';
88
import { transformToChatModelCards } from '@/utils/parseModels';
99

1010
import { parseAgentConfig } from './parseDefaultAgent';

src/app/settings/llm/Ollama/index.tsx

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ const OllamaProvider = memo(() => {
2020
label: t('llm.checker.title'),
2121
minWidth: undefined,
2222
}}
23+
modelList={{ showModelFetcher: true }}
2324
provider={ModelProvider.Ollama}
2425
showApiKey={false}
2526
showEndpoint

src/app/settings/llm/OpenAI/index.tsx

+6-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,12 @@ import { memo } from 'react';
44
import ProviderConfig from '../components/ProviderConfig';
55

66
const OpenAIProvider = memo(() => (
7-
<ProviderConfig provider={'openai'} showEndpoint title={<OpenAI.Combine size={24} />} />
7+
<ProviderConfig
8+
modelList={{ showModelFetcher: true }}
9+
provider={'openai'}
10+
showEndpoint
11+
title={<OpenAI.Combine size={24} />}
12+
/>
813
));
914

1015
export default OpenAIProvider;

src/app/settings/llm/OpenRouter/index.tsx

+1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ const OpenRouterProvider = memo(() => {
1212
return (
1313
<ProviderConfig
1414
checkModel={'mistralai/mistral-7b-instruct:free'}
15+
modelList={{ showModelFetcher: true }}
1516
provider={ModelProvider.OpenRouter}
1617
title={
1718
<OpenRouter.Combine

src/app/settings/llm/TogetherAI/index.tsx

+1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ const TogetherAIProvider = memo(() => {
1010
return (
1111
<ProviderConfig
1212
checkModel={'togethercomputer/alpaca-7b'}
13+
modelList={{ showModelFetcher: true }}
1314
provider={'togetherai'}
1415
title={
1516
<Together.Combine

src/app/settings/llm/components/ProviderConfig/index.tsx

+2
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ interface ProviderConfigProps {
2828
azureDeployName?: boolean;
2929
notFoundContent?: ReactNode;
3030
placeholder?: string;
31+
showModelFetcher?: boolean;
3132
};
3233
provider: GlobalLLMProviderKey;
3334
showApiKey?: boolean;
@@ -90,6 +91,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
9091
placeholder={modelList?.placeholder ?? t('llm.modelList.placeholder')}
9192
provider={provider}
9293
showAzureDeployName={modelList?.azureDeployName}
94+
showModelFetcher={modelList?.showModelFetcher}
9395
/>
9496
),
9597
desc: t('llm.modelList.desc'),
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import { Icon, Tooltip } from '@lobehub/ui';
2+
import { Typography } from 'antd';
3+
import { createStyles } from 'antd-style';
4+
import dayjs from 'dayjs';
5+
import { LucideLoaderCircle, LucideRefreshCcwDot } from 'lucide-react';
6+
import { memo } from 'react';
7+
import { Flexbox } from 'react-layout-kit';
8+
9+
import { useGlobalStore } from '@/store/global';
10+
import { modelConfigSelectors } from '@/store/global/selectors';
11+
import { GlobalLLMProviderKey } from '@/types/settings';
12+
13+
const useStyles = createStyles(({ css, token }) => ({
14+
hover: css`
15+
cursor: pointer;
16+
padding: 4px 8px;
17+
border-radius: ${token.borderRadius}px;
18+
transition: all 0.2s ease-in-out;
19+
20+
&:hover {
21+
color: ${token.colorText};
22+
background-color: ${token.colorFillSecondary};
23+
}
24+
`,
25+
}));
26+
27+
interface ModelFetcherProps {
28+
provider: GlobalLLMProviderKey;
29+
}
30+
31+
const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
32+
const { styles } = useStyles();
33+
const [useFetchProviderModelList] = useGlobalStore((s) => [
34+
s.useFetchProviderModelList,
35+
s.setModelProviderConfig,
36+
]);
37+
const enabledAutoFetch = useGlobalStore(modelConfigSelectors.enabledAutoFetchModels(provider));
38+
const latestFetchTime = useGlobalStore(
39+
(s) => modelConfigSelectors.providerConfig(provider)(s)?.latestFetchTime,
40+
);
41+
const totalModels = useGlobalStore(
42+
(s) => modelConfigSelectors.providerModelCards(provider)(s).length,
43+
);
44+
45+
const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch);
46+
47+
return (
48+
<Typography.Text style={{ fontSize: 12 }} type={'secondary'}>
49+
<Flexbox align={'center'} gap={0} horizontal justify={'space-between'}>
50+
<div>{totalModels} 个模型可用</div>
51+
<Tooltip title={`上次更新时间:${dayjs(latestFetchTime).format('MM-DD HH:mm:ss')}`}>
52+
<Flexbox
53+
align={'center'}
54+
className={styles.hover}
55+
gap={4}
56+
horizontal
57+
onClick={() => mutate()}
58+
>
59+
<Icon
60+
icon={isValidating ? LucideLoaderCircle : LucideRefreshCcwDot}
61+
size={'small'}
62+
spin={isValidating}
63+
/>
64+
<div>{isValidating ? '正在获取模型列表...' : '获取模型列表'}</div>
65+
</Flexbox>
66+
</Tooltip>
67+
</Flexbox>
68+
</Typography.Text>
69+
);
70+
});
71+
export default ModelFetcher;

src/app/settings/llm/components/ProviderModelList/index.tsx

+74-63
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import { modelConfigSelectors, modelProviderSelectors } from '@/store/global/sel
1212
import { GlobalLLMProviderKey } from '@/types/settings';
1313

1414
import ModelConfigModal from './ModelConfigModal';
15+
import ModelFetcher from './ModelFetcher';
1516
import OptionRender from './Option';
1617

1718
const styles = {
@@ -36,20 +37,24 @@ interface CustomModelSelectProps {
3637
placeholder?: string;
3738
provider: GlobalLLMProviderKey;
3839
showAzureDeployName?: boolean;
40+
showModelFetcher?: boolean;
3941
}
4042

4143
const ProviderModelListSelect = memo<CustomModelSelectProps>(
42-
({ provider, showAzureDeployName, notFoundContent, placeholder }) => {
44+
({ showModelFetcher = false, provider, showAzureDeployName, notFoundContent, placeholder }) => {
4345
const { t } = useTranslation('common');
4446
const { t: transSetting } = useTranslation('setting');
45-
const chatModelCards = useGlobalStore(
46-
modelConfigSelectors.providerModelCards(provider),
47-
isEqual,
48-
);
4947
const [setModelProviderConfig, dispatchCustomModelCards] = useGlobalStore((s) => [
5048
s.setModelProviderConfig,
5149
s.dispatchCustomModelCards,
50+
s.useFetchProviderModelList,
5251
]);
52+
53+
const chatModelCards = useGlobalStore(
54+
modelConfigSelectors.providerModelCards(provider),
55+
isEqual,
56+
);
57+
5358
const defaultEnableModel = useGlobalStore(
5459
modelProviderSelectors.defaultEnabledProviderModels(provider),
5560
isEqual,
@@ -58,72 +63,78 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
5863
modelConfigSelectors.providerEnableModels(provider),
5964
isEqual,
6065
);
66+
6167
const showReset = !!enabledModels && !isEqual(defaultEnableModel, enabledModels);
6268

6369
return (
64-
<div style={{ position: 'relative' }}>
65-
<div className={cx(styles.reset)}>
66-
{showReset && (
67-
<ActionIcon
68-
icon={RotateCwIcon}
69-
onClick={() => {
70-
setModelProviderConfig(provider, { enabledModels: null });
71-
}}
72-
size={'small'}
73-
title={t('reset')}
74-
/>
75-
)}
76-
</div>
77-
<Select<string[]>
78-
allowClear
79-
mode="tags"
80-
notFoundContent={notFoundContent}
81-
onChange={(value, options) => {
82-
setModelProviderConfig(provider, { enabledModels: value.filter(Boolean) });
70+
<>
71+
<Flexbox gap={8}>
72+
<div style={{ position: 'relative' }}>
73+
<div className={cx(styles.reset)}>
74+
{showReset && (
75+
<ActionIcon
76+
icon={RotateCwIcon}
77+
onClick={() => {
78+
setModelProviderConfig(provider, { enabledModels: null });
79+
}}
80+
size={'small'}
81+
title={t('reset')}
82+
/>
83+
)}
84+
</div>
85+
<Select<string[]>
86+
allowClear
87+
mode="tags"
88+
notFoundContent={notFoundContent}
89+
onChange={(value, options) => {
90+
setModelProviderConfig(provider, { enabledModels: value.filter(Boolean) });
8391

84-
// if there is a new model, add it to `customModelCards`
85-
options.forEach((option: { label?: string; value?: string }, index: number) => {
86-
// if is a known model, it should have value
87-
// if is an unknown model, the option will be {}
88-
if (option.value) return;
92+
// if there is a new model, add it to `customModelCards`
93+
options.forEach((option: { label?: string; value?: string }, index: number) => {
94+
// if is a known model, it should have value
95+
// if is an unknown model, the option will be {}
96+
if (option.value) return;
8997

90-
const modelId = value[index];
98+
const modelId = value[index];
9199

92-
dispatchCustomModelCards(provider, {
93-
modelCard: { id: modelId },
94-
type: 'add',
95-
});
96-
});
97-
}}
98-
optionFilterProp="label"
99-
optionRender={({ label, value }) => {
100-
// model is in the chatModels
101-
if (chatModelCards.some((c) => c.id === value))
102-
return (
103-
<OptionRender
104-
displayName={label as string}
105-
id={value as string}
106-
provider={provider}
107-
/>
108-
);
100+
dispatchCustomModelCards(provider, {
101+
modelCard: { id: modelId },
102+
type: 'add',
103+
});
104+
});
105+
}}
106+
optionFilterProp="label"
107+
optionRender={({ label, value }) => {
108+
// model is in the chatModels
109+
if (chatModelCards.some((c) => c.id === value))
110+
return (
111+
<OptionRender
112+
displayName={label as string}
113+
id={value as string}
114+
provider={provider}
115+
/>
116+
);
109117

110-
// model is defined by user in client
111-
return (
112-
<Flexbox align={'center'} gap={8} horizontal>
113-
{transSetting('llm.customModelCards.addNew', { id: value })}
114-
</Flexbox>
115-
);
116-
}}
117-
options={chatModelCards.map((model) => ({
118-
label: model.displayName || model.id,
119-
value: model.id,
120-
}))}
121-
placeholder={placeholder}
122-
popupClassName={cx(styles.popup)}
123-
value={enabledModels ?? defaultEnableModel}
124-
/>
118+
// model is defined by user in client
119+
return (
120+
<Flexbox align={'center'} gap={8} horizontal>
121+
{transSetting('llm.customModelCards.addNew', { id: value })}
122+
</Flexbox>
123+
);
124+
}}
125+
options={chatModelCards.map((model) => ({
126+
label: model.displayName || model.id,
127+
value: model.id,
128+
}))}
129+
placeholder={placeholder}
130+
popupClassName={cx(styles.popup)}
131+
value={enabledModels ?? defaultEnableModel}
132+
/>
133+
</div>
134+
{showModelFetcher && <ModelFetcher provider={provider} />}
135+
</Flexbox>
125136
<ModelConfigModal provider={provider} showAzureDeployName={showAzureDeployName} />
126-
</div>
137+
</>
127138
);
128139
},
129140
);

src/app/settings/llm/index.tsx

+5-5
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,17 @@ export default memo<{ showOllama: boolean }>(({ showOllama }) => {
3232
<OpenAI />
3333
<Azure />
3434
{showOllama && <Ollama />}
35-
<Anthropic />
3635
<Google />
37-
<Groq />
36+
<Anthropic />
3837
<Bedrock />
38+
<OpenRouter />
39+
<TogetherAI />
40+
<Groq />
3941
<Perplexity />
4042
<Mistral />
41-
<OpenRouter />
4243
<Moonshot />
43-
<ZeroOne />
4444
<Zhipu />
45-
<TogetherAI />
45+
<ZeroOne />
4646
<Footer>
4747
<Trans i18nKey="llm.waitingForMore" ns={'setting'}>
4848
更多模型正在

0 commit comments

Comments
 (0)