mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-02-07 23:43:30 +08:00
Merge pull request #3743 from vloum/feat/add-lobeapi
lobe 添加ChatChat-api 和 models
This commit is contained in:
commit
7061cb6297
@ -1,5 +1,5 @@
|
|||||||
# add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array.
|
# add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array.
|
||||||
#ACCESS_CODE=lobe66
|
# ACCESS_CODE=lobe66
|
||||||
|
|
||||||
# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106
|
# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106
|
||||||
# CUSTOM_MODELS=model1,model2,model3
|
# CUSTOM_MODELS=model1,model2,model3
|
||||||
@ -14,7 +14,7 @@
|
|||||||
########################################
|
########################################
|
||||||
|
|
||||||
# you openai api key
|
# you openai api key
|
||||||
OPENAI_API_KEY=sk-xxxxxxxxx
|
OPENAI_API_KEY = sk-xxxxxxxxx
|
||||||
|
|
||||||
# use a proxy to connect to the OpenAI API
|
# use a proxy to connect to the OpenAI API
|
||||||
# OPENAI_PROXY_URL=https://api.openai.com/v1
|
# OPENAI_PROXY_URL=https://api.openai.com/v1
|
||||||
@ -40,27 +40,27 @@ OPENAI_API_KEY=sk-xxxxxxxxx
|
|||||||
############ ZhiPu AI Service ##########
|
############ ZhiPu AI Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
|
# ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
########## Moonshot AI Service #########
|
########## Moonshot AI Service #########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
########### Google AI Service ##########
|
########### Google AI Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
######### AWS Bedrock Service ##########
|
######### AWS Bedrock Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#AWS_REGION=us-east-1
|
# AWS_REGION=us-east-1
|
||||||
#AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
|
# AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
|
||||||
#AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
######### Ollama AI Service ##########
|
######### Ollama AI Service ##########
|
||||||
@ -73,19 +73,19 @@ OPENAI_API_KEY=sk-xxxxxxxxx
|
|||||||
########### Mistral AI Service ##########
|
########### Mistral AI Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
######### Perplexity Service ##########
|
######### Perplexity Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
######### Anthropic Service ##########
|
######### Anthropic Service ##########
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
#ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
########################################
|
########################################
|
||||||
############ Market Service ############
|
############ Market Service ############
|
||||||
@ -98,6 +98,9 @@ OPENAI_API_KEY=sk-xxxxxxxxx
|
|||||||
############ Plugin Service ############
|
############ Plugin Service ############
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
|
# you can use ChatChat.The local/remote ChatChat service url
|
||||||
|
CHATCHAT_PROXY_URL = 'http://localhost:7861/v1'
|
||||||
|
|
||||||
# The LobeChat plugins store index url
|
# The LobeChat plugins store index url
|
||||||
# PLUGINS_INDEX_URL=https://chat-plugins.lobehub.com
|
# PLUGINS_INDEX_URL=https://chat-plugins.lobehub.com
|
||||||
|
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import {
|
|||||||
LobePerplexityAI,
|
LobePerplexityAI,
|
||||||
LobeRuntimeAI,
|
LobeRuntimeAI,
|
||||||
LobeZhipuAI,
|
LobeZhipuAI,
|
||||||
|
LobeChatChatAI,
|
||||||
ModelProvider,
|
ModelProvider,
|
||||||
} from '@/libs/agent-runtime';
|
} from '@/libs/agent-runtime';
|
||||||
import { TraceClient } from '@/libs/traces';
|
import { TraceClient } from '@/libs/traces';
|
||||||
@ -167,6 +168,11 @@ class AgentRuntime {
|
|||||||
runtimeModel = this.initMistral(payload);
|
runtimeModel = this.initMistral(payload);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case ModelProvider.ChatChat: {
|
||||||
|
runtimeModel = this.initChatChat(payload);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new AgentRuntime(runtimeModel);
|
return new AgentRuntime(runtimeModel);
|
||||||
@ -268,6 +274,13 @@ class AgentRuntime {
|
|||||||
|
|
||||||
return new LobeMistralAI({ apiKey });
|
return new LobeMistralAI({ apiKey });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static initChatChat(payload: JWTPayload) {
|
||||||
|
const { CHATCHAT_PROXY_URL } = getServerConfig();
|
||||||
|
const baseURL = payload?.endpoint || CHATCHAT_PROXY_URL;
|
||||||
|
|
||||||
|
return new LobeChatChatAI({ baseURL });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export default AgentRuntime;
|
export default AgentRuntime;
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import { useHover } from 'ahooks';
|
|||||||
import { createStyles, useResponsive } from 'antd-style';
|
import { createStyles, useResponsive } from 'antd-style';
|
||||||
import { memo, useMemo, useRef } from 'react';
|
import { memo, useMemo, useRef } from 'react';
|
||||||
import Avatar from '@/components/Avatar';
|
import Avatar from '@/components/Avatar';
|
||||||
|
|
||||||
const { Item } = List;
|
const { Item } = List;
|
||||||
|
|
||||||
const useStyles = createStyles(({ css, token, responsive }) => {
|
const useStyles = createStyles(({ css, token, responsive }) => {
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import { MobileNavBar } from '@lobehub/ui';
|
import { MobileNavBar } from '@lobehub/ui';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import Logo from '@/components/Logo';
|
import Logo from '@/components/Logo';
|
||||||
|
|
||||||
const Header = memo(() => {
|
const Header = memo(() => {
|
||||||
return <MobileNavBar center={<Logo type={'text'} />} />;
|
return <MobileNavBar center={<Logo type={'text'} />} />;
|
||||||
});
|
});
|
||||||
|
|||||||
65
frontend/src/app/settings/llm/ChatChat/index.tsx
Normal file
65
frontend/src/app/settings/llm/ChatChat/index.tsx
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
import { Input, Flex } from 'antd';
|
||||||
|
import { useTheme } from 'antd-style';
|
||||||
|
import { memo } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import Avatar from 'next/image';
|
||||||
|
|
||||||
|
import { imageUrl } from '@/const/url';
|
||||||
|
|
||||||
|
import { ModelProvider } from '@/libs/agent-runtime';
|
||||||
|
|
||||||
|
import Checker from '../components/Checker';
|
||||||
|
import ProviderConfig from '../components/ProviderConfig';
|
||||||
|
import { LLMProviderBaseUrlKey, LLMProviderConfigKey } from '../const';
|
||||||
|
|
||||||
|
const providerKey = 'chatchat';
|
||||||
|
|
||||||
|
const ChatChatProvider = memo(() => {
|
||||||
|
const { t } = useTranslation('setting');
|
||||||
|
const theme = useTheme();
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ProviderConfig
|
||||||
|
configItems={[
|
||||||
|
{
|
||||||
|
children: <Input allowClear placeholder={t('llm.ChatChat.endpoint.placeholder')} />,
|
||||||
|
desc: t('llm.ChatChat.endpoint.desc'),
|
||||||
|
label: t('llm.ChatChat.endpoint.title'),
|
||||||
|
name: [LLMProviderConfigKey, providerKey, LLMProviderBaseUrlKey],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
children: (
|
||||||
|
<Input.TextArea
|
||||||
|
allowClear
|
||||||
|
placeholder={t('llm.ChatChat.customModelName.placeholder')}
|
||||||
|
style={{ height: 100 }}
|
||||||
|
/>
|
||||||
|
),
|
||||||
|
desc: t('llm.ChatChat.customModelName.desc'),
|
||||||
|
label: t('llm.ChatChat.customModelName.title'),
|
||||||
|
name: [LLMProviderConfigKey, providerKey, 'customModelName'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
children: <Checker model={'gml-4'} provider={ModelProvider.ChatChat} />,
|
||||||
|
desc: t('llm.ChatChat.checker.desc'),
|
||||||
|
label: t('llm.checker.title'),
|
||||||
|
minWidth: undefined,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
provider={providerKey}
|
||||||
|
title={
|
||||||
|
<Flex>
|
||||||
|
<Avatar
|
||||||
|
alt={'Chatchat'}
|
||||||
|
height={24}
|
||||||
|
src={imageUrl('logo.png')}
|
||||||
|
width={24}
|
||||||
|
/>
|
||||||
|
{ 'ChatChat' }
|
||||||
|
</Flex>
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default ChatChatProvider;
|
||||||
@ -17,6 +17,7 @@ import Ollama from './Ollama';
|
|||||||
import OpenAI from './OpenAI';
|
import OpenAI from './OpenAI';
|
||||||
import Perplexity from './Perplexity';
|
import Perplexity from './Perplexity';
|
||||||
import Zhipu from './Zhipu';
|
import Zhipu from './Zhipu';
|
||||||
|
import ChatChat from './ChatChat'
|
||||||
|
|
||||||
export default memo<{ showOllama: boolean }>(({ showOllama }) => {
|
export default memo<{ showOllama: boolean }>(({ showOllama }) => {
|
||||||
const { t } = useTranslation('setting');
|
const { t } = useTranslation('setting');
|
||||||
@ -34,6 +35,7 @@ export default memo<{ showOllama: boolean }>(({ showOllama }) => {
|
|||||||
<Anthropic />
|
<Anthropic />
|
||||||
<Mistral />
|
<Mistral />
|
||||||
{showOllama && <Ollama />}
|
{showOllama && <Ollama />}
|
||||||
|
<ChatChat/>
|
||||||
<Footer>
|
<Footer>
|
||||||
<Trans i18nKey="llm.waitingForMore" ns={'setting'}>
|
<Trans i18nKey="llm.waitingForMore" ns={'setting'}>
|
||||||
更多模型正在
|
更多模型正在
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import { MobileNavBar } from '@lobehub/ui';
|
import { MobileNavBar } from '@lobehub/ui';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import Logo from '@/components/Logo';
|
import Logo from '@/components/Logo';
|
||||||
|
|
||||||
const Header = memo(() => <MobileNavBar center={<Logo type={'text'} />} />);
|
const Header = memo(() => <MobileNavBar center={<Logo type={'text'} />} />);
|
||||||
|
|
||||||
export default Header;
|
export default Header;
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import { Loader2 } from 'lucide-react';
|
|||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import { Center, Flexbox } from 'react-layout-kit';
|
import { Center, Flexbox } from 'react-layout-kit';
|
||||||
import Logo from '@/components/Logo';
|
import Logo from '@/components/Logo';
|
||||||
|
|
||||||
const FullscreenLoading = memo<{ title?: string }>(({ title }) => {
|
const FullscreenLoading = memo<{ title?: string }>(({ title }) => {
|
||||||
return (
|
return (
|
||||||
<Flexbox height={'100%'} style={{ userSelect: 'none' }} width={'100%'}>
|
<Flexbox height={'100%'} style={{ userSelect: 'none' }} width={'100%'}>
|
||||||
|
|||||||
@ -12,6 +12,8 @@ import {
|
|||||||
OpenAI,
|
OpenAI,
|
||||||
Perplexity,
|
Perplexity,
|
||||||
Tongyi,
|
Tongyi,
|
||||||
|
Spark,
|
||||||
|
Wenxin,
|
||||||
} from '@lobehub/icons';
|
} from '@lobehub/icons';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
|
|
||||||
@ -25,7 +27,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model, size = 12 }) => {
|
|||||||
|
|
||||||
if (model.startsWith('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
|
if (model.startsWith('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
|
||||||
if (model.startsWith('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
|
if (model.startsWith('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
|
||||||
if (model.startsWith('glm')) return <ChatGLM.Avatar size={size} />;
|
if (model.includes('glm')) return <ChatGLM.Avatar size={size} />;
|
||||||
if (model.includes('claude')) return <Claude.Avatar size={size} />;
|
if (model.includes('claude')) return <Claude.Avatar size={size} />;
|
||||||
if (model.includes('titan')) return <Aws.Avatar size={size} />;
|
if (model.includes('titan')) return <Aws.Avatar size={size} />;
|
||||||
if (model.includes('llama')) return <Meta.Avatar size={size} />;
|
if (model.includes('llama')) return <Meta.Avatar size={size} />;
|
||||||
@ -38,6 +40,8 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model, size = 12 }) => {
|
|||||||
return <Baichuan.Avatar background={Baichuan.colorPrimary} size={size} />;
|
return <Baichuan.Avatar background={Baichuan.colorPrimary} size={size} />;
|
||||||
if (model.includes('mistral') || model.includes('mixtral')) return <Mistral.Avatar size={size} />;
|
if (model.includes('mistral') || model.includes('mixtral')) return <Mistral.Avatar size={size} />;
|
||||||
if (model.includes('pplx')) return <Perplexity.Avatar size={size} />;
|
if (model.includes('pplx')) return <Perplexity.Avatar size={size} />;
|
||||||
|
if (model.includes('Spark')) return <Spark.Avatar size={size} />;
|
||||||
|
if (model.includes('ERNIE')) return <Wenxin.Avatar size={size} />;
|
||||||
});
|
});
|
||||||
|
|
||||||
export default ModelIcon;
|
export default ModelIcon;
|
||||||
|
|||||||
@ -12,9 +12,12 @@ import {
|
|||||||
} from '@lobehub/icons';
|
} from '@lobehub/icons';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import { Center } from 'react-layout-kit';
|
import { Center } from 'react-layout-kit';
|
||||||
|
import Avatar from 'next/image';
|
||||||
|
|
||||||
import { ModelProvider } from '@/libs/agent-runtime';
|
import { ModelProvider } from '@/libs/agent-runtime';
|
||||||
|
|
||||||
|
import { imageUrl } from '@/const/url';
|
||||||
|
|
||||||
interface ModelProviderIconProps {
|
interface ModelProviderIconProps {
|
||||||
provider?: string;
|
provider?: string;
|
||||||
}
|
}
|
||||||
@ -69,6 +72,15 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
|
|||||||
return <Anthropic size={20} />;
|
return <Anthropic size={20} />;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case ModelProvider.ChatChat: {
|
||||||
|
return <Avatar
|
||||||
|
alt={'Chatchat'}
|
||||||
|
height={24}
|
||||||
|
src={imageUrl('logo.png')}
|
||||||
|
width={24}
|
||||||
|
/>
|
||||||
|
}
|
||||||
|
|
||||||
default: {
|
default: {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
83
frontend/src/config/modelProviders/chatchat.ts
Normal file
83
frontend/src/config/modelProviders/chatchat.ts
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
import { ModelProviderCard } from '@/types/llm';
|
||||||
|
|
||||||
|
const ChatChat: ModelProviderCard = {
|
||||||
|
id: 'chatchat',
|
||||||
|
chatModels: [
|
||||||
|
{
|
||||||
|
id: 'chatglm3-6b',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'chatglm3-6b',
|
||||||
|
functionCall: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'chatglm_turbo',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'chatglm_turbo',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'chatglm_std',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'chatglm_std',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'chatglm_lite',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'chatglm_lite',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-turbo',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen-turbo',
|
||||||
|
functionCall: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-plus',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen-plus',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-max',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen-max',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen:7b',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen:7b',
|
||||||
|
functionCall: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen:14b',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen:14b',
|
||||||
|
functionCall: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-max-longcontext',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'qwen-max-longcontext',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'ERNIE-Bot',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'ERNIE-Bot',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'ERNIE-Bot-turbo',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'ERNIE-Bot-turbo',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'ERNIE-Bot-4',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'ERNIE-Bot-4',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'SparkDesk',
|
||||||
|
tokens: 4000,
|
||||||
|
displayName: 'SparkDesk',
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
export default ChatChat;
|
||||||
@ -9,6 +9,7 @@ import OllamaProvider from './ollama';
|
|||||||
import OpenAIProvider from './openai';
|
import OpenAIProvider from './openai';
|
||||||
import PerplexityProvider from './perplexity';
|
import PerplexityProvider from './perplexity';
|
||||||
import ZhiPuProvider from './zhipu';
|
import ZhiPuProvider from './zhipu';
|
||||||
|
import ChatChatProvider from './chatchat'
|
||||||
|
|
||||||
export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
||||||
OpenAIProvider.chatModels,
|
OpenAIProvider.chatModels,
|
||||||
@ -20,6 +21,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|||||||
OllamaProvider.chatModels,
|
OllamaProvider.chatModels,
|
||||||
PerplexityProvider.chatModels,
|
PerplexityProvider.chatModels,
|
||||||
AnthropicProvider.chatModels,
|
AnthropicProvider.chatModels,
|
||||||
|
ChatChatProvider.chatModels,
|
||||||
].flat();
|
].flat();
|
||||||
|
|
||||||
export { default as AnthropicProvider } from './anthropic';
|
export { default as AnthropicProvider } from './anthropic';
|
||||||
@ -31,3 +33,4 @@ export { default as OllamaProvider } from './ollama';
|
|||||||
export { default as OpenAIProvider } from './openai';
|
export { default as OpenAIProvider } from './openai';
|
||||||
export { default as PerplexityProvider } from './perplexity';
|
export { default as PerplexityProvider } from './perplexity';
|
||||||
export { default as ZhiPuProvider } from './zhipu';
|
export { default as ZhiPuProvider } from './zhipu';
|
||||||
|
export { default as ChatChatProvider } from './chatchat'
|
||||||
|
|||||||
@ -46,6 +46,9 @@ declare global {
|
|||||||
|
|
||||||
// Ollama Provider;
|
// Ollama Provider;
|
||||||
OLLAMA_PROXY_URL?: string;
|
OLLAMA_PROXY_URL?: string;
|
||||||
|
|
||||||
|
// ChatChat
|
||||||
|
CHATCHAT_PROXY_URL?: string;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -114,5 +117,7 @@ export const getProviderConfig = () => {
|
|||||||
|
|
||||||
ENABLE_OLLAMA: !!process.env.OLLAMA_PROXY_URL,
|
ENABLE_OLLAMA: !!process.env.OLLAMA_PROXY_URL,
|
||||||
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
|
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
|
||||||
|
|
||||||
|
CHATCHAT_PROXY_URL: process.env.CHATCHAT_PROXY_URL || '',
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@ -91,6 +91,10 @@ export const DEFAULT_LLM_CONFIG: GlobalLLMConfig = {
|
|||||||
apiKey: '',
|
apiKey: '',
|
||||||
enabled: false,
|
enabled: false,
|
||||||
},
|
},
|
||||||
|
chatchat: {
|
||||||
|
enabled: false,
|
||||||
|
endpoint: ''
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export const DEFAULT_AGENT: GlobalDefaultAgent = {
|
export const DEFAULT_AGENT: GlobalDefaultAgent = {
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import { createStyles } from 'antd-style';
|
|||||||
import { ReactNode, memo } from 'react';
|
import { ReactNode, memo } from 'react';
|
||||||
import { Center, Flexbox } from 'react-layout-kit';
|
import { Center, Flexbox } from 'react-layout-kit';
|
||||||
import Avatar from '@/components/Avatar';
|
import Avatar from '@/components/Avatar';
|
||||||
|
|
||||||
export const useStyles = createStyles(({ css, token }) => ({
|
export const useStyles = createStyles(({ css, token }) => ({
|
||||||
container: css`
|
container: css`
|
||||||
color: ${token.colorText};
|
color: ${token.colorText};
|
||||||
|
|||||||
102
frontend/src/libs/agent-runtime/chatchat/index.test.ts
Normal file
102
frontend/src/libs/agent-runtime/chatchat/index.test.ts
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// @vitest-environment node
|
||||||
|
import OpenAI from 'openai';
|
||||||
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
|
||||||
|
import { ChatStreamCallbacks } from '@/libs/agent-runtime';
|
||||||
|
|
||||||
|
import * as debugStreamModule from '../utils/debugStream';
|
||||||
|
import { LobeChatChatAI } from './index';
|
||||||
|
|
||||||
|
const provider = 'knowledge';
|
||||||
|
const defaultBaseURL = 'http://localhost:7861/v1';
|
||||||
|
const bizErrorType = 'knowledgeBizError';
|
||||||
|
const invalidErrorType = 'InvalidKnowledgeArgs';
|
||||||
|
|
||||||
|
// Mock the console.error to avoid polluting test output
|
||||||
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||||
|
|
||||||
|
let instance: LobeChatChatAI;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
instance = new LobeChatChatAI({ apiKey: 'knowledge', baseURL: defaultBaseURL });
|
||||||
|
|
||||||
|
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
||||||
|
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
||||||
|
new ReadableStream() as any,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('LobeChatChatAI', () => {
|
||||||
|
|
||||||
|
describe('init', ()=>{
|
||||||
|
it('should init with default baseURL', () => {
|
||||||
|
expect(instance.baseURL).toBe(defaultBaseURL);
|
||||||
|
});
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('chat', () => {
|
||||||
|
it('should return a StreamingTextResponse on successful API call', async () => {
|
||||||
|
// Arrange
|
||||||
|
const mockStream = new ReadableStream();
|
||||||
|
const mockResponse = Promise.resolve(mockStream);
|
||||||
|
|
||||||
|
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
||||||
|
|
||||||
|
// Act
|
||||||
|
const result = await instance.chat({
|
||||||
|
messages: [{ content: 'Hello', role: 'user' }],
|
||||||
|
model: 'gpt-3.5-turbo',
|
||||||
|
temperature: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(result).toBeInstanceOf(Response);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return a StreamingTextResponse on successful API call', async () => {
|
||||||
|
// Arrange
|
||||||
|
const mockResponse = Promise.resolve({
|
||||||
|
"id": "chatcmpl-98QIb3NiYLYlRTB6t0VrJ0wntNW6K",
|
||||||
|
"object": "chat.completion",
|
||||||
|
"created": 1711794745,
|
||||||
|
"model": "gpt-3.5-turbo-0125",
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": 0,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "你好!有什么可以帮助你的吗?"
|
||||||
|
},
|
||||||
|
"logprobs": null,
|
||||||
|
"finish_reason": "stop"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": 9,
|
||||||
|
"completion_tokens": 17,
|
||||||
|
"total_tokens": 26
|
||||||
|
},
|
||||||
|
"system_fingerprint": "fp_b28b39ffa8"
|
||||||
|
});
|
||||||
|
|
||||||
|
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
||||||
|
|
||||||
|
// Act
|
||||||
|
const result = await instance.chat({
|
||||||
|
messages: [{ content: 'Hello', role: 'user' }],
|
||||||
|
model: 'gpt-3.5-turbo',
|
||||||
|
stream: false,
|
||||||
|
temperature: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(result).toBeInstanceOf(Response);
|
||||||
|
});
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
});
|
||||||
111
frontend/src/libs/agent-runtime/chatchat/index.ts
Normal file
111
frontend/src/libs/agent-runtime/chatchat/index.ts
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
import { OpenAIStream, StreamingTextResponse } from 'ai';
|
||||||
|
import OpenAI, { ClientOptions } from 'openai';
|
||||||
|
|
||||||
|
import { LobeRuntimeAI } from '../BaseAI';
|
||||||
|
import { AgentRuntimeErrorType } from '../error';
|
||||||
|
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
||||||
|
import { AgentRuntimeError } from '../utils/createError';
|
||||||
|
import { debugStream } from '../utils/debugStream';
|
||||||
|
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
||||||
|
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
||||||
|
import { Stream } from 'openai/streaming';
|
||||||
|
|
||||||
|
const DEFAULT_BASE_URL = 'http://localhost:7861/v1';
|
||||||
|
|
||||||
|
|
||||||
|
export class LobeChatChatAI implements LobeRuntimeAI {
|
||||||
|
private client: OpenAI;
|
||||||
|
|
||||||
|
baseURL: string;
|
||||||
|
|
||||||
|
constructor({ apiKey = 'chatChat', baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
|
||||||
|
if (!baseURL) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidChatChatArgs);
|
||||||
|
|
||||||
|
this.client = new OpenAI({ apiKey, baseURL, ...res });
|
||||||
|
this.baseURL = baseURL;
|
||||||
|
}
|
||||||
|
|
||||||
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.client.chat.completions.create(
|
||||||
|
payload as unknown as (OpenAI.ChatCompletionCreateParamsStreaming | OpenAI.ChatCompletionCreateParamsNonStreaming),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (LobeChatChatAI.isStream(response)) {
|
||||||
|
|
||||||
|
const [prod, debug] = response.tee();
|
||||||
|
|
||||||
|
if (process.env.DEBUG_OLLAMA_CHAT_COMPLETION === '1') {
|
||||||
|
debugStream(debug.toReadableStream()).catch(console.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
|
||||||
|
headers: options?.headers,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
|
||||||
|
if (process.env.DEBUG_OLLAMA_CHAT_COMPLETION === '1') {
|
||||||
|
console.debug(JSON.stringify(response));
|
||||||
|
}
|
||||||
|
|
||||||
|
const stream = LobeChatChatAI.createChatCompletionStream(response?.choices[0].message.content || '');
|
||||||
|
|
||||||
|
return new StreamingTextResponse(stream);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
let desensitizedEndpoint = this.baseURL;
|
||||||
|
|
||||||
|
if (this.baseURL !== DEFAULT_BASE_URL) {
|
||||||
|
desensitizedEndpoint = desensitizeUrl(this.baseURL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ('status' in (error as any)) {
|
||||||
|
switch ((error as Response).status) {
|
||||||
|
case 401: {
|
||||||
|
throw AgentRuntimeError.chat({
|
||||||
|
endpoint: desensitizedEndpoint,
|
||||||
|
error: error as any,
|
||||||
|
errorType: AgentRuntimeErrorType.InvalidChatChatArgs,
|
||||||
|
provider: ModelProvider.ChatChat,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
default: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
||||||
|
|
||||||
|
const errorType = RuntimeError || AgentRuntimeErrorType.ChatChatBizError;
|
||||||
|
|
||||||
|
throw AgentRuntimeError.chat({
|
||||||
|
endpoint: desensitizedEndpoint,
|
||||||
|
error: errorResult,
|
||||||
|
errorType,
|
||||||
|
provider: ModelProvider.ChatChat,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static isStream(obj: unknown): obj is Stream<OpenAI.Chat.Completions.ChatCompletionChunk> {
|
||||||
|
return typeof Stream !== 'undefined' && (obj instanceof Stream || obj instanceof ReadableStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// 创建一个类型为 Stream<string> 的流
|
||||||
|
static createChatCompletionStream(text: string): ReadableStream<string> {
|
||||||
|
|
||||||
|
const stream = new ReadableStream({
|
||||||
|
start(controller) {
|
||||||
|
controller.enqueue(text);
|
||||||
|
controller.close();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -34,6 +34,9 @@ export const AgentRuntimeErrorType = {
|
|||||||
|
|
||||||
InvalidAnthropicAPIKey: 'InvalidAnthropicAPIKey',
|
InvalidAnthropicAPIKey: 'InvalidAnthropicAPIKey',
|
||||||
AnthropicBizError: 'AnthropicBizError',
|
AnthropicBizError: 'AnthropicBizError',
|
||||||
|
|
||||||
|
InvalidChatChatArgs: 'InvalidChatChatArgs',
|
||||||
|
ChatChatBizError: 'ChatChatBizError',
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
export type ILobeAgentRuntimeErrorType =
|
export type ILobeAgentRuntimeErrorType =
|
||||||
|
|||||||
@ -2,6 +2,7 @@ export { LobeAnthropicAI } from './anthropic';
|
|||||||
export { LobeAzureOpenAI } from './azureOpenai';
|
export { LobeAzureOpenAI } from './azureOpenai';
|
||||||
export * from './BaseAI';
|
export * from './BaseAI';
|
||||||
export { LobeBedrockAI } from './bedrock';
|
export { LobeBedrockAI } from './bedrock';
|
||||||
|
export { LobeChatChatAI } from './chatchat';
|
||||||
export * from './error';
|
export * from './error';
|
||||||
export { LobeGoogleAI } from './google';
|
export { LobeGoogleAI } from './google';
|
||||||
export { LobeMistralAI } from './mistral';
|
export { LobeMistralAI } from './mistral';
|
||||||
|
|||||||
@ -25,6 +25,7 @@ export enum ModelProvider {
|
|||||||
Anthropic = 'anthropic',
|
Anthropic = 'anthropic',
|
||||||
Azure = 'azure',
|
Azure = 'azure',
|
||||||
Bedrock = 'bedrock',
|
Bedrock = 'bedrock',
|
||||||
|
ChatChat = 'chatchat',
|
||||||
ChatGLM = 'chatglm',
|
ChatGLM = 'chatglm',
|
||||||
Google = 'google',
|
Google = 'google',
|
||||||
Mistral = 'mistral',
|
Mistral = 'mistral',
|
||||||
@ -33,5 +34,5 @@ export enum ModelProvider {
|
|||||||
OpenAI = 'openai',
|
OpenAI = 'openai',
|
||||||
Perplexity = 'perplexity',
|
Perplexity = 'perplexity',
|
||||||
Tongyi = 'tongyi',
|
Tongyi = 'tongyi',
|
||||||
ZhiPu = 'zhipu',
|
ZhiPu = 'zhipu'
|
||||||
}
|
}
|
||||||
|
|||||||
@ -111,6 +111,7 @@ export default {
|
|||||||
openai: 'OpenAI',
|
openai: 'OpenAI',
|
||||||
perplexity: 'Perplexity',
|
perplexity: 'Perplexity',
|
||||||
zhipu: '智谱AI',
|
zhipu: '智谱AI',
|
||||||
|
chatchat: 'ChatChat',
|
||||||
},
|
},
|
||||||
noDescription: '暂无描述',
|
noDescription: '暂无描述',
|
||||||
oauth: 'SSO 登录',
|
oauth: 'SSO 登录',
|
||||||
|
|||||||
@ -181,6 +181,22 @@ export default {
|
|||||||
title: 'API Key',
|
title: 'API Key',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
ChatChat: {
|
||||||
|
title: 'ChatChat',
|
||||||
|
checker: {
|
||||||
|
desc: '测试地址是否正确填写',
|
||||||
|
},
|
||||||
|
customModelName: {
|
||||||
|
desc: '增加自定义模型,多个模型使用逗号(,)隔开',
|
||||||
|
placeholder: 'gml-4',
|
||||||
|
title: '自定义模型名称',
|
||||||
|
},
|
||||||
|
endpoint: {
|
||||||
|
desc: '填入 ChatCaht 接口代理地址,本地未额外指定可留空',
|
||||||
|
placeholder: 'http://127.0.0.1:7861/chat',
|
||||||
|
title: '接口代理地址',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
checker: {
|
checker: {
|
||||||
button: '检查',
|
button: '检查',
|
||||||
|
|||||||
@ -60,6 +60,10 @@ export const getProviderAuthPayload = (provider: string) => {
|
|||||||
return { apiKey: modelProviderSelectors.mistralAPIKey(useGlobalStore.getState()) };
|
return { apiKey: modelProviderSelectors.mistralAPIKey(useGlobalStore.getState()) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case ModelProvider.ChatChat: {
|
||||||
|
return { endpoint: modelProviderSelectors.chatChatProxyUrl(useGlobalStore.getState()) }
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
case ModelProvider.OpenAI: {
|
case ModelProvider.OpenAI: {
|
||||||
const openai = modelProviderSelectors.openAIConfig(useGlobalStore.getState());
|
const openai = modelProviderSelectors.openAIConfig(useGlobalStore.getState());
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import {
|
|||||||
OpenAIProvider,
|
OpenAIProvider,
|
||||||
PerplexityProvider,
|
PerplexityProvider,
|
||||||
ZhiPuProvider,
|
ZhiPuProvider,
|
||||||
|
ChatChatProvider,
|
||||||
} from '@/config/modelProviders';
|
} from '@/config/modelProviders';
|
||||||
import { ChatModelCard, ModelProviderCard } from '@/types/llm';
|
import { ChatModelCard, ModelProviderCard } from '@/types/llm';
|
||||||
import { GlobalLLMProviderKey } from '@/types/settings';
|
import { GlobalLLMProviderKey } from '@/types/settings';
|
||||||
@ -60,6 +61,9 @@ const perplexityAPIKey = (s: GlobalStore) => modelProvider(s).perplexity.apiKey;
|
|||||||
const enableAnthropic = (s: GlobalStore) => modelProvider(s).anthropic.enabled;
|
const enableAnthropic = (s: GlobalStore) => modelProvider(s).anthropic.enabled;
|
||||||
const anthropicAPIKey = (s: GlobalStore) => modelProvider(s).anthropic.apiKey;
|
const anthropicAPIKey = (s: GlobalStore) => modelProvider(s).anthropic.apiKey;
|
||||||
|
|
||||||
|
const enableChatChat = (s: GlobalStore) => modelProvider(s).chatchat.enabled;
|
||||||
|
const chatChatProxyUrl = (s: GlobalStore) => modelProvider(s).chatchat.endpoint;
|
||||||
|
|
||||||
// const azureModelList = (s: GlobalStore): ModelProviderCard => {
|
// const azureModelList = (s: GlobalStore): ModelProviderCard => {
|
||||||
// const azure = azureConfig(s);
|
// const azure = azureConfig(s);
|
||||||
// return {
|
// return {
|
||||||
@ -148,6 +152,7 @@ const modelSelectList = (s: GlobalStore): ModelProviderCard[] => {
|
|||||||
{ ...PerplexityProvider, enabled: enablePerplexity(s) },
|
{ ...PerplexityProvider, enabled: enablePerplexity(s) },
|
||||||
{ ...AnthropicProvider, enabled: enableAnthropic(s) },
|
{ ...AnthropicProvider, enabled: enableAnthropic(s) },
|
||||||
{ ...MistralProvider, enabled: enableMistral(s) },
|
{ ...MistralProvider, enabled: enableMistral(s) },
|
||||||
|
{ ...ChatChatProvider, enabled: enableChatChat(s) },
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -230,4 +235,8 @@ export const modelProviderSelectors = {
|
|||||||
// Mistral
|
// Mistral
|
||||||
enableMistral,
|
enableMistral,
|
||||||
mistralAPIKey,
|
mistralAPIKey,
|
||||||
|
|
||||||
|
// ChatChat
|
||||||
|
enableChatChat,
|
||||||
|
chatChatProxyUrl,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -70,6 +70,12 @@ export interface MistralConfig {
|
|||||||
enabled: boolean;
|
enabled: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface ChatChatConfig {
|
||||||
|
customModelName?: string;
|
||||||
|
enabled?: boolean;
|
||||||
|
endpoint?: string;
|
||||||
|
}
|
||||||
|
|
||||||
export interface GlobalLLMConfig {
|
export interface GlobalLLMConfig {
|
||||||
anthropic: AnthropicConfig;
|
anthropic: AnthropicConfig;
|
||||||
azure: AzureOpenAIConfig;
|
azure: AzureOpenAIConfig;
|
||||||
@ -81,6 +87,7 @@ export interface GlobalLLMConfig {
|
|||||||
openAI: OpenAIConfig;
|
openAI: OpenAIConfig;
|
||||||
perplexity: PerplexityConfig;
|
perplexity: PerplexityConfig;
|
||||||
zhipu: ZhiPuConfig;
|
zhipu: ZhiPuConfig;
|
||||||
|
chatchat: ChatChatConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type GlobalLLMProviderKey = keyof GlobalLLMConfig;
|
export type GlobalLLMProviderKey = keyof GlobalLLMConfig;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user