[update]补充chatchat model

This commit is contained in:
VLOU 2024-04-13 20:07:59 +08:00
parent 6f3a07ee61
commit 44eaf62e6c
12 changed files with 88 additions and 31 deletions

View File

@ -1,5 +1,5 @@
# add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array.
#ACCESS_CODE=lobe66
# ACCESS_CODE=lobe66
# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106
# CUSTOM_MODELS=model1,model2,model3
@ -14,7 +14,7 @@
########################################
# you openai api key
OPENAI_API_KEY=sk-xxxxxxxxx
OPENAI_API_KEY = sk-xxxxxxxxx
# use a proxy to connect to the OpenAI API
# OPENAI_PROXY_URL=https://api.openai.com/v1
@ -40,27 +40,27 @@ OPENAI_API_KEY=sk-xxxxxxxxx
############ ZhiPu AI Service ##########
########################################
#ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
# ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
########################################
########## Moonshot AI Service #########
########################################
#MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
########### Google AI Service ##########
########################################
#GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
######### AWS Bedrock Service ##########
########################################
#AWS_REGION=us-east-1
#AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
#AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# AWS_REGION=us-east-1
# AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
# AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
######### Ollama AI Service ##########
@ -73,19 +73,19 @@ OPENAI_API_KEY=sk-xxxxxxxxx
########### Mistral AI Service ##########
########################################
#MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
######### Perplexity Service ##########
########################################
#PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
######### Anthropic Service ##########
########################################
#ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
############ Market Service ############
@ -98,6 +98,9 @@ OPENAI_API_KEY=sk-xxxxxxxxx
############ Plugin Service ############
########################################
# you can use ChatChat.The local/remote ChatChat service url
CHATCHAT_PROXY_URL = 'http://localhost:7861/v1'
# The LobeChat plugins store index url
# PLUGINS_INDEX_URL=https://chat-plugins.lobehub.com

View File

@ -276,8 +276,8 @@ class AgentRuntime {
}
private static initChatChat(payload: JWTPayload) {
const { KNOWLEDGE_PROXY_URL } = getServerConfig();
const baseURL = payload?.endpoint || KNOWLEDGE_PROXY_URL;
const { CHATCHAT_PROXY_URL } = getServerConfig();
const baseURL = payload?.endpoint || CHATCHAT_PROXY_URL;
return new LobeChatChatAI({ baseURL });
}

View File

@ -25,7 +25,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model, size = 12 }) => {
if (model.startsWith('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
if (model.startsWith('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
if (model.startsWith('glm')) return <ChatGLM.Avatar size={size} />;
if (model.includes('glm')) return <ChatGLM.Avatar size={size} />;
if (model.includes('claude')) return <Claude.Avatar size={size} />;
if (model.includes('titan')) return <Aws.Avatar size={size} />;
if (model.includes('llama')) return <Meta.Avatar size={size} />;

View File

@ -12,9 +12,12 @@ import {
} from '@lobehub/icons';
import { memo } from 'react';
import { Center } from 'react-layout-kit';
import Avatar from 'next/image';
import { ModelProvider } from '@/libs/agent-runtime';
import { imageUrl } from '@/const/url';
interface ModelProviderIconProps {
provider?: string;
}
@ -69,6 +72,15 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
return <Anthropic size={20} />;
}
case ModelProvider.ChatChat: {
return <Avatar
alt={'Chatchat'}
height={24}
src={imageUrl('logo.png')}
width={24}
/>
}
default: {
return null;
}

View File

@ -0,0 +1,20 @@
import { ModelProviderCard } from '@/types/llm';
const ChatChat: ModelProviderCard = {
id: 'chatchat',
chatModels: [
{
id: 'chatglm_pro',
tokens: 128_000,
displayName: 'chatglm_pro'
},
{
id: 'gpt-4-turbo-2024-04-09',
tokens: 128_000,
displayName: 'gpt-4-turbo-2024-04-09',
vision: true,
}
]
}
export default ChatChat;

View File

@ -9,6 +9,7 @@ import OllamaProvider from './ollama';
import OpenAIProvider from './openai';
import PerplexityProvider from './perplexity';
import ZhiPuProvider from './zhipu';
import ChatChatProvider from './chatchat'
export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
OpenAIProvider.chatModels,
@ -20,6 +21,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
OllamaProvider.chatModels,
PerplexityProvider.chatModels,
AnthropicProvider.chatModels,
ChatChatProvider.chatModels,
].flat();
export { default as AnthropicProvider } from './anthropic';
@ -31,3 +33,4 @@ export { default as OllamaProvider } from './ollama';
export { default as OpenAIProvider } from './openai';
export { default as PerplexityProvider } from './perplexity';
export { default as ZhiPuProvider } from './zhipu';
export { default as ChatChatProvider } from './chatchat'

View File

@ -46,6 +46,9 @@ declare global {
// Ollama Provider;
OLLAMA_PROXY_URL?: string;
// ChatChat
CHATCHAT_PROXY_URL?: string;
}
}
}
@ -115,6 +118,6 @@ export const getProviderConfig = () => {
ENABLE_OLLAMA: !!process.env.OLLAMA_PROXY_URL,
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
KNOWLEDGE_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
CHATCHAT_PROXY_URL: process.env.CHATCHAT_PROXY_URL || '',
};
};

View File

@ -26,24 +26,26 @@ export class LobeChatChatAI implements LobeRuntimeAI {
}
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
console.log('payload---', payload)
try {
const response = await this.client.chat.completions.create(
payload as unknown as (OpenAI.ChatCompletionCreateParamsStreaming | OpenAI.ChatCompletionCreateParamsNonStreaming),
);
if (LobeChatChatAI.isStream(response)) {
const [prod, debug] = response.tee();
if (process.env.DEBUG_OLLAMA_CHAT_COMPLETION === '1') {
debugStream(debug.toReadableStream()).catch(console.error);
}
debugStream(debug.toReadableStream()).catch(console.error);
}
return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
headers: options?.headers,
});
} else {
if (process.env.DEBUG_OLLAMA_CHAT_COMPLETION === '1') {
console.debug(JSON.stringify(response));
}
@ -93,18 +95,18 @@ export class LobeChatChatAI implements LobeRuntimeAI {
return typeof Stream !== 'undefined' && (obj instanceof Stream || obj instanceof ReadableStream);
}
// 创建一个类型为 Stream<string> 的流
static createChatCompletionStream(text: string): ReadableStream<string> {
const stream = new ReadableStream({
start(controller) {
controller.enqueue(text);
controller.close();
},
});
return stream;
const stream = new ReadableStream({
start(controller) {
controller.enqueue(text);
controller.close();
},
});
return stream;
}
}

View File

@ -25,7 +25,7 @@ export enum ModelProvider {
Anthropic = 'anthropic',
Azure = 'azure',
Bedrock = 'bedrock',
ChatChat = 'chatChat',
ChatChat = 'chatchat',
ChatGLM = 'chatglm',
Google = 'google',
Mistral = 'mistral',

View File

@ -111,6 +111,7 @@ export default {
openai: 'OpenAI',
perplexity: 'Perplexity',
zhipu: '智谱AI',
chatchat: 'ChatChat',
},
noDescription: '暂无描述',
oauth: 'SSO 登录',

View File

@ -60,6 +60,10 @@ export const getProviderAuthPayload = (provider: string) => {
return { apiKey: modelProviderSelectors.mistralAPIKey(useGlobalStore.getState()) };
}
case ModelProvider.ChatChat: {
return { endpoint: modelProviderSelectors.chatChatProxyUrl(useGlobalStore.getState()) }
}
default:
case ModelProvider.OpenAI: {
const openai = modelProviderSelectors.openAIConfig(useGlobalStore.getState());

View File

@ -11,6 +11,7 @@ import {
OpenAIProvider,
PerplexityProvider,
ZhiPuProvider,
ChatChatProvider,
} from '@/config/modelProviders';
import { ChatModelCard, ModelProviderCard } from '@/types/llm';
import { GlobalLLMProviderKey } from '@/types/settings';
@ -60,6 +61,9 @@ const perplexityAPIKey = (s: GlobalStore) => modelProvider(s).perplexity.apiKey;
const enableAnthropic = (s: GlobalStore) => modelProvider(s).anthropic.enabled;
const anthropicAPIKey = (s: GlobalStore) => modelProvider(s).anthropic.apiKey;
const enableChatChat = (s: GlobalStore) => modelProvider(s).chatchat.enabled;
const chatChatProxyUrl = (s: GlobalStore) => modelProvider(s).chatchat.endpoint;
// const azureModelList = (s: GlobalStore): ModelProviderCard => {
// const azure = azureConfig(s);
// return {
@ -148,6 +152,7 @@ const modelSelectList = (s: GlobalStore): ModelProviderCard[] => {
{ ...PerplexityProvider, enabled: enablePerplexity(s) },
{ ...AnthropicProvider, enabled: enableAnthropic(s) },
{ ...MistralProvider, enabled: enableMistral(s) },
{ ...ChatChatProvider, enabled: enableChatChat(s) },
];
};
@ -230,4 +235,8 @@ export const modelProviderSelectors = {
// Mistral
enableMistral,
mistralAPIKey,
// ChatChat
enableChatChat,
chatChatProxyUrl,
};