Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ feat: Add Baichuan model provider #3097

Merged
merged 2 commits into from
Jul 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import {
Anthropic,
Baichuan,
Claude,
DeepSeek,
Gemini,
Expand All @@ -24,6 +25,7 @@ import urlJoin from 'url-join';

import {
AnthropicProviderCard,
BaichuanProviderCard,
DeepSeekProviderCard,
GoogleProviderCard,
GroqProviderCard,
Expand Down Expand Up @@ -170,6 +172,11 @@ export const useProviderList = (): ProviderItem[] => {
docUrl: urlJoin(BASE_DOC_URL, 'stepfun'),
title: <Stepfun.Combine size={20} type={'color'} />,
},
{
...BaichuanProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'baichuan'),
title: <Baichuan.Combine size={ 20 } type={ 'color' } />,
},
],
[azureProvider, ollamaProvider, ollamaProvider, bedrockProvider],
);
Expand Down
7 changes: 7 additions & 0 deletions src/app/api/chat/agentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {

const apiKey = apiKeyManager.pick(payload?.apiKey || STEPFUN_API_KEY);

return { apiKey };
}
case ModelProvider.Baichuan: {
const { BAICHUAN_API_KEY } = getLLMConfig();

const apiKey = apiKeyManager.pick(payload?.apiKey || BAICHUAN_API_KEY);

return { apiKey };
}
}
Expand Down
5 changes: 5 additions & 0 deletions src/components/ModelProviderIcon/index.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import {
Anthropic,
Azure,
Baichuan,
Bedrock,
DeepSeek,
Google,
Expand Down Expand Up @@ -114,6 +115,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
return <Stepfun size={20} />;
}

case ModelProvider.Baichuan: {
return <Baichuan size={20} />;
}

default: {
return null;
}
Expand Down
6 changes: 6 additions & 0 deletions src/config/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ export const getLLMConfig = () => {

ENABLED_STEPFUN: z.boolean(),
STEPFUN_API_KEY: z.string().optional(),

ENABLED_BAICHUAN: z.boolean(),
BAICHUAN_API_KEY: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
Expand Down Expand Up @@ -155,6 +158,9 @@ export const getLLMConfig = () => {

ENABLED_STEPFUN: !!process.env.STEPFUN_API_KEY,
STEPFUN_API_KEY: process.env.STEPFUN_API_KEY,

ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
},
});
};
Expand Down
55 changes: 55 additions & 0 deletions src/config/modelProviders/baichuan.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://platform.baichuan-ai.com/price
const Baichuan: ModelProviderCard = {
chatModels: [
{
description: '模型能力国内第一,在知识百科、长文本、生成创作等中文任务上超越国外主流模型。还具备行业领先的多模态能力,多项权威评测基准表现优异。',
displayName: 'Baichuan 4',
enabled: true,
functionCall: true,
id: 'Baichuan4',
maxOutput: 4096,
tokens: 32_768,
},
{
description: '针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
displayName: 'Baichuan 3 Turbo',
enabled: true,
functionCall: true,
id: 'Baichuan3-Turbo',
maxOutput: 8192,
tokens: 32_768,
},
{
description: '具备 128K 超长上下文窗口,针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
displayName: 'Baichuan 3 Turbo 128k',
enabled: true,
id: 'Baichuan3-Turbo-128k',
maxOutput: 4096,
tokens: 128_000,
},
{
description: '采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
displayName: 'Baichuan 2 Turbo',
enabled: true,
id: 'Baichuan2-Turbo',
maxOutput: 8192,
tokens: 32_768,
},
{
description: '具备 192K 超长上下文窗口,采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
displayName: 'Baichuan 2 Turbo 192k',
enabled: true,
id: 'Baichuan2-Turbo-192k',
maxOutput: 2048,
tokens: 192_000,
},
],
checkModel: 'Baichuan4',
id: 'baichuan',
modelList: { showModelFetcher: true },
name: 'Baichuan',
};

export default Baichuan;
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { ChatModelCard, ModelProviderCard } from '@/types/llm';

import AnthropicProvider from './anthropic';
import AzureProvider from './azure';
import BaichuanProvider from './baichuan';
import BedrockProvider from './bedrock';
import DeepSeekProvider from './deepseek';
import GoogleProvider from './google';
Expand Down Expand Up @@ -37,6 +38,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
AnthropicProvider.chatModels,
ZeroOneProvider.chatModels,
StepfunProvider.chatModels,
BaichuanProvider.chatModels,
].flat();

export const DEFAULT_MODEL_PROVIDER_LIST = [
Expand All @@ -58,6 +60,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
ZeroOneProvider,
ZhiPuProvider,
StepfunProvider,
BaichuanProvider,
];

export const filterEnabledModels = (provider: ModelProviderCard) => {
Expand All @@ -66,6 +69,7 @@ export const filterEnabledModels = (provider: ModelProviderCard) => {

export { default as AnthropicProviderCard } from './anthropic';
export { default as AzureProviderCard } from './azure';
export { default as BaichuanProviderCard } from './baichuan';
export { default as BedrockProviderCard } from './bedrock';
export { default as DeepSeekProviderCard } from './deepseek';
export { default as GoogleProviderCard } from './google';
Expand Down
5 changes: 5 additions & 0 deletions src/const/settings/llm.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import {
AnthropicProviderCard,
BaichuanProviderCard,
BedrockProviderCard,
DeepSeekProviderCard,
GoogleProviderCard,
Expand Down Expand Up @@ -29,6 +30,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
azure: {
enabled: false,
},
baichuan: {
enabled: false,
enabledModels: filterEnabledModels(BaichuanProviderCard),
},
bedrock: {
enabled: false,
enabledModels: filterEnabledModels(BedrockProviderCard),
Expand Down
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import type { TracePayload } from '@/const/trace';
import { LobeRuntimeAI } from './BaseAI';
import { LobeAnthropicAI } from './anthropic';
import { LobeAzureOpenAI } from './azureOpenai';
import { LobeBaichuanAI } from './baichuan';
import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
import { LobeDeepSeekAI } from './deepseek';
import { LobeGoogleAI } from './google';
Expand Down Expand Up @@ -103,6 +104,7 @@ class AgentRuntime {
params: Partial<{
anthropic: Partial<ClientOptions>;
azure: { apiVersion?: string; apikey?: string; endpoint?: string };
baichuan: Partial<ClientOptions>;
bedrock: Partial<LobeBedrockAIParams>;
deepseek: Partial<ClientOptions>;
google: { apiKey?: string; baseURL?: string };
Expand Down Expand Up @@ -219,6 +221,11 @@ class AgentRuntime {
runtimeModel = new LobeStepfunAI(params.stepfun ?? {});
break;
}

case ModelProvider.Baichuan: {
runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
break
}
}

return new AgentRuntime(runtimeModel);
Expand Down
Loading
Loading