Langchain-Chatchat/configs/model_config.py.example
liunux4odoo 65466007ae make torch & transformers optional
import pydantic Model & Field from langchain.pydantic_v1 instead of pydantic.v1
2024-03-06 13:50:06 +08:00

147 lines
3.5 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import os
# 默认选用的 LLM 名称
DEFAULT_LLM_MODEL = "chatglm3-6b"
# 默认选用的 Embedding 名称
DEFAULT_EMBEDDING_MODEL = "bge-large-zh-v1.5"
# AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])
Agent_MODEL = None
# 历史对话轮数
HISTORY_LEN = 3
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度
MAX_TOKENS = None
# LLM通用对话参数
TEMPERATURE = 0.7
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数
SUPPORT_AGENT_MODELS = [
"chatglm3-6b",
"openai-api",
"Qwen-14B-Chat",
"Qwen-7B-Chat",
"qwen-turbo",
]
LLM_MODEL_CONFIG = {
# 意图识别不需要输出,模型后台知道就行
"preprocess_model": {
DEFAULT_LLM_MODEL: {
"temperature": 0.05,
"max_tokens": 4096,
"history_len": 100,
"prompt_name": "default",
"callbacks": False
},
},
"llm_model": {
DEFAULT_LLM_MODEL: {
"temperature": 0.9,
"max_tokens": 4096,
"history_len": 10,
"prompt_name": "default",
"callbacks": True
},
},
"action_model": {
DEFAULT_LLM_MODEL: {
"temperature": 0.01,
"max_tokens": 4096,
"prompt_name": "ChatGLM3",
"callbacks": True
},
},
"postprocess_model": {
DEFAULT_LLM_MODEL: {
"temperature": 0.01,
"max_tokens": 4096,
"prompt_name": "default",
"callbacks": True
}
},
"image_model": {
"sd-turbo": {
"size": "256*256",
}
},
"multimodal_model": {
"qwen-vl": {}
},
}
# 可以通过 loom/xinference/oneapi/fatchat 启动模型服务,然后将其 URL 和 KEY 配置过来即可。
MODEL_PLATFORMS = [
{
"platform_name": "openai-api",
"platform_type": "openai",
"llm_models": [
"gpt-3.5-turbo",
],
"embed_models": [],
"image_models": [],
"multimodal_models": [],
"api_base_url": "https://api.openai.com/v1",
"api_key": "sk-",
"api_proxy": "",
},
{
"platform_name": "xinference",
"platform_type": "xinference",
"llm_models": [
"chatglm3-6b",
],
"embed_models": [
"bge-large-zh-v1.5",
],
"image_models": [
"sd-turbo",
],
"multimodal_models": [
"qwen-vl",
],
"api_base_url": "http://127.0.0.1:9997/v1",
"api_key": "EMPTY",
},
{
"platform_name": "oneapi",
"platform_type": "oneapi",
"api_key": "",
"llm_models": [
"qwen-turbo",
"qwen-plus",
"chatglm_turbo",
"chatglm_std",
],
"embed_models": [],
"image_models": [],
"multimodal_models": [],
"api_base_url": "http://127.0.0.1:3000/v1",
"api_key": "sk-xxx",
},
{
"platform_name": "loom",
"platform_type": "loom",
"api_key": "",
"llm_models": [
"chatglm3-6b",
],
"embed_models": [],
"image_models": [],
"multimodal_models": [],
"api_base_url": "http://127.0.0.1:7860/v1",
"api_key": "EMPTY",
},
]
LOOM_CONFIG = os.path.join(os.path.dirname(os.path.abspath(__file__)), "loom.yaml")