diff --git a/configs/model_config.py.example b/configs/model_config.py.example index 9be56953..824a7d28 100644 --- a/configs/model_config.py.example +++ b/configs/model_config.py.example @@ -6,9 +6,9 @@ import os MODEL_ROOT_PATH = "" # 选用的 Embedding 名称 -EMBEDDING_MODEL = "bge-large-zh" +EMBEDDING_MODEL = "bge-large-zh-v1.5" -# Embedding 模型运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 +# Embedding 模型运行设备。设为"auto"会自动检测(会有警告),也可手动设定为 "cuda","mps","cpu","xpu" 其中之一。 EMBEDDING_DEVICE = "auto" # 选用的reranker模型 @@ -26,50 +26,32 @@ EMBEDDING_MODEL_OUTPUT_PATH = "output" # 在这里,我们使用目前主流的两个离线模型,其中,chatglm3-6b 为默认加载模型。 # 如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。 -# chatglm3-6b输出角色标签<|user|>及自问自答的问题详见项目wiki->常见问题->Q20. - -LLM_MODELS = ["chatglm3-6b", "zhipu-api", "openai-api"] # "Qwen-1_8B-Chat", - -# AgentLM模型的名称 (可以不指定,指定之后就锁定进入Agent之后的Chain的模型,不指定就是LLM_MODELS[0]) +LLM_MODELS = ["zhipu-api"] Agent_MODEL = None -# LLM 运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 -LLM_DEVICE = "auto" +LLM_DEVICE = "cuda" -# 历史对话轮数 HISTORY_LEN = 3 -# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度 -MAX_TOKENS = None +MAX_TOKENS = 2048 -# LLM通用对话参数 TEMPERATURE = 0.7 -# TOP_P = 0.95 # ChatOpenAI暂不支持该参数 ONLINE_LLM_MODEL = { - # 线上模型。请在server_config中为每个在线API设置不同的端口 - "openai-api": { - "model_name": "gpt-3.5-turbo", + "model_name": "gpt-4", "api_base_url": "https://api.openai.com/v1", "api_key": "", "openai_proxy": "", }, - # 获取api_key请前往https://makersuite.google.com/或者google cloud,使用前先确认网络正常,使用代理请在项目启动(python startup.py -a)环境内设置https_proxy环境变量 - "gemini-api": { - "api_key": "", - "provider": "GeminiWorker", - }, - - # 具体注册及api key获取请前往 http://open.bigmodel.cn + # 智谱AI API(不支持GLM4,本版本无法兼容,敬请期待0.3.x)具体注册及api key获取请前往 http://open.bigmodel.cn "zhipu-api": { "api_key": "", - "version": "chatglm_turbo", # 可选包括 "chatglm_turbo" + "version": "chatglm_turbo", "provider": "ChatGLMWorker", }, - # 具体注册及api key获取请前往 https://api.minimax.chat/ "minimax-api": { "group_id": "", @@ -78,7 +60,6 @@ ONLINE_LLM_MODEL = { "provider": "MiniMaxWorker", }, - # 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/ "xinghuo-api": { "APPID": "", @@ -99,8 +80,8 @@ ONLINE_LLM_MODEL = { # 火山方舟 API,文档参考 https://www.volcengine.com/docs/82379 "fangzhou-api": { - "version": "chatglm-6b-model", # 当前支持 "chatglm-6b-model", 更多的见文档模型支持列表中方舟部分。 - "version_url": "", # 可以不填写version,直接填写在方舟申请模型发布的API地址 + "version": "chatglm-6b-model", + "version_url": "", "api_key": "", "secret_key": "", "provider": "FangZhouWorker", @@ -108,15 +89,15 @@ ONLINE_LLM_MODEL = { # 阿里云通义千问 API,文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details "qwen-api": { - "version": "qwen-turbo", # 可选包括 "qwen-turbo", "qwen-plus" - "api_key": "", # 请在阿里云控制台模型服务灵积API-KEY管理页面创建 + "version": "qwen-max", + "api_key": "", "provider": "QwenWorker", - "embed_model": "text-embedding-v1" # embedding 模型名称 + "embed_model": "text-embedding-v1" # embedding 模型名称 }, # 百川 API,申请方式请参考 https://www.baichuan-ai.com/home#api-enter "baichuan-api": { - "version": "Baichuan2-53B", # 当前支持 "Baichuan2-53B", 见官方文档。 + "version": "Baichuan2-53B", "api_key": "", "secret_key": "", "provider": "BaiChuanWorker", @@ -138,6 +119,11 @@ ONLINE_LLM_MODEL = { "secret_key": "", "provider": "TianGongWorker", }, + # Gemini API (开发组未测试,由社群提供,只支持pro) + "gemini-api": { + "api_key": "", + "provider": "GeminiWorker", + } } @@ -149,6 +135,7 @@ ONLINE_LLM_MODEL = { # - GanymedeNil/text2vec-large-chinese # - text2vec-large-chinese # 2.2 如果以上本地路径不存在,则使用huggingface模型 + MODEL_PATH = { "embed_model": { "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", @@ -167,7 +154,7 @@ MODEL_PATH = { "bge-large-zh": "BAAI/bge-large-zh", "bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct", "bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5", - "bge-large-zh-v1.5": "BAAI/bge-large-zh-v1.5", + "bge-large-zh-v1.5": "/share/home/zyx/Models/bge-large-zh-v1.5", "piccolo-base-zh": "sensenova/piccolo-base-zh", "piccolo-large-zh": "sensenova/piccolo-large-zh", "nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large", @@ -175,55 +162,55 @@ MODEL_PATH = { }, "llm_model": { - # 以下部分模型并未完全测试,仅根据fastchat和vllm模型的模型列表推定支持 "chatglm2-6b": "THUDM/chatglm2-6b", "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k", - "chatglm3-6b": "THUDM/chatglm3-6b", "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k", - "chatglm3-6b-base": "THUDM/chatglm3-6b-base", - "Qwen-1_8B": "Qwen/Qwen-1_8B", - "Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat", - "Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8", - "Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4", + "Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf", + "Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf", + "Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf", - "Qwen-7B": "Qwen/Qwen-7B", + "Qwen-1_8B-Chat": "/media/checkpoint/Qwen-1_8B-Chat", "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat", - - "Qwen-14B": "Qwen/Qwen-14B", "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat", - - "Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8", - # 在新版的transformers下需要手动修改模型的config.json文件,在quantization_config字典中 - # 增加`disable_exllama:true` 字段才能启动qwen的量化模型 - "Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4", - - "Qwen-72B": "Qwen/Qwen-72B", "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat", - "Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8", - "Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4", - "baichuan2-13b": "baichuan-inc/Baichuan2-13B-Chat", - "baichuan2-7b": "baichuan-inc/Baichuan2-7B-Chat", - - "baichuan-7b": "baichuan-inc/Baichuan-7B", - "baichuan-13b": "baichuan-inc/Baichuan-13B", + "baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat", "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat", - - "aquila-7b": "BAAI/Aquila-7B", - "aquilachat-7b": "BAAI/AquilaChat-7B", + "baichuan2-7b-chat": "baichuan-inc/Baichuan2-7B-Chat", + "baichuan2-13b-chat": "baichuan-inc/Baichuan2-13B-Chat", "internlm-7b": "internlm/internlm-7b", "internlm-chat-7b": "internlm/internlm-chat-7b", + "internlm2-chat-7b": "internlm/internlm2-chat-7b", + "internlm2-chat-20b": "internlm/internlm2-chat-20b", + + "BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat", + "BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k", + + "Yi-34B-Chat": "https://huggingface.co/01-ai/Yi-34B-Chat", + + "agentlm-7b": "THUDM/agentlm-7b", + "agentlm-13b": "THUDM/agentlm-13b", + "agentlm-70b": "THUDM/agentlm-70b", "falcon-7b": "tiiuae/falcon-7b", - "falcon-40b": "tiiuae/falcon-40b", + "falcon-40b": "tiiuae/falcon-40,b", "falcon-rw-7b": "tiiuae/falcon-rw-7b", + "aquila-7b": "BAAI/Aquila-7B", + "aquilachat-7b": "BAAI/AquilaChat-7B", + "open_llama_13b": "openlm-research/open_llama_13b", + "vicuna-13b-v1.5": "lmsys/vicuna-13b-v1.5", + "koala": "young-geng/koala", + "mpt-7b": "mosaicml/mpt-7b", + "mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter", + "mpt-30b": "mosaicml/mpt-30b", + "opt-66b": "facebook/opt-66b", + "opt-iml-max-30b": "facebook/opt-iml-max-30b", "gpt2": "gpt2", "gpt2-xl": "gpt2-xl", - "gpt-j-6b": "EleutherAI/gpt-j-6b", "gpt4all-j": "nomic-ai/gpt4all-j", "gpt-neox-20b": "EleutherAI/gpt-neox-20b", @@ -231,63 +218,50 @@ MODEL_PATH = { "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "dolly-v2-12b": "databricks/dolly-v2-12b", "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b", - - "Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf", - "Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf", - "open_llama_13b": "openlm-research/open_llama_13b", - "vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3", - "koala": "young-geng/koala", - - "mpt-7b": "mosaicml/mpt-7b", - "mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter", - "mpt-30b": "mosaicml/mpt-30b", - "opt-66b": "facebook/opt-66b", - "opt-iml-max-30b": "facebook/opt-iml-max-30b", - - "agentlm-7b": "THUDM/agentlm-7b", - "agentlm-13b": "THUDM/agentlm-13b", - "agentlm-70b": "THUDM/agentlm-70b", - - "Yi-34B-Chat": "01-ai/Yi-34B-Chat", }, - "reranker":{ - "bge-reranker-large":"BAAI/bge-reranker-large", - "bge-reranker-base":"BAAI/bge-reranker-base", - #TODO 增加在线reranker,如cohere + "reranker": { + "bge-reranker-large": "BAAI/bge-reranker-large", + "bge-reranker-base": "BAAI/bge-reranker-base", } } - # 通常情况下不需要更改以下内容 # nltk 模型存储路径 NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data") +# 使用VLLM可能导致模型推理能力下降,无法完成Agent任务 VLLM_MODEL_DICT = { - "aquila-7b": "BAAI/Aquila-7B", - "aquilachat-7b": "BAAI/AquilaChat-7B", - - "baichuan-7b": "baichuan-inc/Baichuan-7B", - "baichuan-13b": "baichuan-inc/Baichuan-13B", - "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat", - "chatglm2-6b": "THUDM/chatglm2-6b", "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k", "chatglm3-6b": "THUDM/chatglm3-6b", "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k", + "Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf", + "Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf", + "Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf", + + "Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat", + "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat", + "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat", + "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat", + + "baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat", + "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat", + "baichuan2-7b-chat": "baichuan-inc/Baichuan-7B-Chat", + "baichuan2-13b-chat": "baichuan-inc/Baichuan-13B-Chat", + "BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat", "BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k", - # 注意:bloom系列的tokenizer与model是分离的,因此虽然vllm支持,但与fschat框架不兼容 - # "bloom": "bigscience/bloom", - # "bloomz": "bigscience/bloomz", - # "bloomz-560m": "bigscience/bloomz-560m", - # "bloomz-7b1": "bigscience/bloomz-7b1", - # "bloomz-1b7": "bigscience/bloomz-1b7", - "internlm-7b": "internlm/internlm-7b", "internlm-chat-7b": "internlm/internlm-chat-7b", + "internlm2-chat-7b": "internlm/Models/internlm2-chat-7b", + "internlm2-chat-20b": "internlm/Models/internlm2-chat-20b", + + "aquila-7b": "BAAI/Aquila-7B", + "aquilachat-7b": "BAAI/AquilaChat-7B", + "falcon-7b": "tiiuae/falcon-7b", "falcon-40b": "tiiuae/falcon-40b", "falcon-rw-7b": "tiiuae/falcon-rw-7b", @@ -300,8 +274,6 @@ VLLM_MODEL_DICT = { "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "dolly-v2-12b": "databricks/dolly-v2-12b", "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b", - "Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf", - "Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf", "open_llama_13b": "openlm-research/open_llama_13b", "vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3", "koala": "young-geng/koala", @@ -311,32 +283,8 @@ VLLM_MODEL_DICT = { "opt-66b": "facebook/opt-66b", "opt-iml-max-30b": "facebook/opt-iml-max-30b", - "Qwen-1_8B": "Qwen/Qwen-1_8B", - "Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat", - "Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8", - "Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4", - - "Qwen-7B": "Qwen/Qwen-7B", - "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat", - - "Qwen-14B": "Qwen/Qwen-14B", - "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat", - "Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8", - "Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4", - - "Qwen-72B": "Qwen/Qwen-72B", - "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat", - "Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8", - "Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4", - - "agentlm-7b": "THUDM/agentlm-7b", - "agentlm-13b": "THUDM/agentlm-13b", - "agentlm-70b": "THUDM/agentlm-70b", - } -# 你认为支持Agent能力的模型,可以在这里添加,添加后不会出现可视化界面的警告 -# 经过我们测试,原生支持Agent的模型仅有以下几个 SUPPORT_AGENT_MODEL = [ "azure-api", "openai-api", @@ -344,4 +292,6 @@ SUPPORT_AGENT_MODEL = [ "Qwen", "chatglm3", "xinghuo-api", + "internlm2-chat-7b", + "internlm2-chat-20b" ] diff --git a/server/agent/tools/weather_check.py b/server/agent/tools/weather_check.py index 7e55c7cb..8e9f3c6b 100644 --- a/server/agent/tools/weather_check.py +++ b/server/agent/tools/weather_check.py @@ -20,6 +20,6 @@ def weather(location: str, api_key: str): def weathercheck(location: str): - return weather(location, "S8vrB4U_-c5mvAMiK") + return weather(location, "your keys") class WeatherInput(BaseModel): - location: str = Field(description="City name,include city and county,like '厦门'") + location: str = Field(description="City name,include city and county") diff --git a/server/model_workers/gemini.py b/server/model_workers/gemini.py index 46130212..0cd8e159 100644 --- a/server/model_workers/gemini.py +++ b/server/model_workers/gemini.py @@ -18,7 +18,7 @@ class GeminiWorker(ApiModelWorker): **kwargs, ): kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr) - kwargs.setdefault("context_len", 4096) #TODO 16K模型需要改成16384 + kwargs.setdefault("context_len", 4096) super().__init__(**kwargs) def create_gemini_messages(self,messages) -> json: @@ -47,10 +47,10 @@ class GeminiWorker(ApiModelWorker): params.load_config(self.model_names[0]) data = self.create_gemini_messages(messages=params.messages) generationConfig=dict( - temperature = params.temperature, - topK = 1, - topP = 1, - maxOutputTokens = 4096, + temperature=params.temperature, + topK=1, + topP=1, + maxOutputTokens=4096, stopSequences=[] )