* 支持了agentlm

* 支持了agentlm和相关提示词

* 修改了Agent的一些功能,加入了Embed方面的一个优化

* 修改了部分Agent的工具

* 增加一些Langchain的自带工具

* 修复一些兼容性的bug

* 恢复知识库

* 恢复知识库

* 1

* 修复Azure问题

* 修复zilliz的问题

* 修复Agent历史记录问题,优化知识库问答搜索的uoji

* 修复讯飞大模型token问题

* 修复讯飞大模型token问题和配置文件

---------

Co-authored-by: zR <zRzRzRzRzRzRzR>
This commit is contained in:
zR 2023-10-27 13:14:48 +08:00 committed by GitHub
parent 24d1e28a07
commit dea468ab0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 46 additions and 35 deletions

View File

@ -108,6 +108,10 @@ LLM_DEVICE = "auto"
# 历史对话轮数 # 历史对话轮数
HISTORY_LEN = 3 HISTORY_LEN = 3
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度
MAX_TOKENS = None
# LLM通用对话参数 # LLM通用对话参数
TEMPERATURE = 0.7 TEMPERATURE = 0.7
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数 # TOP_P = 0.95 # ChatOpenAI暂不支持该参数
@ -132,7 +136,7 @@ ONLINE_LLM_MODEL = {
"APPID": "", "APPID": "",
"APISecret": "", "APISecret": "",
"api_key": "", "api_key": "",
"is_v2": False, "version": "v1.5", # 你使用的讯飞星火大模型版本,可选包括 "v3.0", "v1.5", "v2.0"
"provider": "XingHuoWorker", "provider": "XingHuoWorker",
}, },
# 百度千帆 API申请方式请参考 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/4lilb2lpf # 百度千帆 API申请方式请参考 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/4lilb2lpf

View File

@ -11,7 +11,7 @@ from langchain.schema.language_model import BaseLanguageModel
from typing import List, Any, Optional from typing import List, Any, Optional
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from server.chat.knowledge_base_chat import knowledge_base_chat from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio import asyncio
from server.agent import model_container from server.agent import model_container
@ -23,7 +23,7 @@ async def search_knowledge_base_iter(database: str, query: str) -> str:
temperature=0.01, temperature=0.01,
history=[], history=[],
top_k=VECTOR_SEARCH_TOP_K, top_k=VECTOR_SEARCH_TOP_K,
max_tokens=None, max_tokens=MAX_TOKENS,
prompt_name="default", prompt_name="default",
score_threshold=SCORE_THRESHOLD, score_threshold=SCORE_THRESHOLD,
stream=False) stream=False)

View File

@ -19,7 +19,7 @@ import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from server.chat.knowledge_base_chat import knowledge_base_chat from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio import asyncio
from server.agent import model_container from server.agent import model_container
@ -32,7 +32,7 @@ async def search_knowledge_base_iter(database: str, query: str):
temperature=0.01, temperature=0.01,
history=[], history=[],
top_k=VECTOR_SEARCH_TOP_K, top_k=VECTOR_SEARCH_TOP_K,
max_tokens=None, max_tokens=MAX_TOKENS,
prompt_name="knowledge_base_chat", prompt_name="knowledge_base_chat",
score_threshold=SCORE_THRESHOLD, score_threshold=SCORE_THRESHOLD,
stream=False) stream=False)

View File

@ -1,6 +1,6 @@
import json import json
from server.chat.search_engine_chat import search_engine_chat from server.chat.search_engine_chat import search_engine_chat
from configs import VECTOR_SEARCH_TOP_K from configs import VECTOR_SEARCH_TOP_K, MAX_TOKENS
import asyncio import asyncio
from server.agent import model_container from server.agent import model_container
@ -11,7 +11,7 @@ async def search_engine_iter(query: str):
temperature=0.01, # Agent 搜索互联网的时候温度设置为0.01 temperature=0.01, # Agent 搜索互联网的时候温度设置为0.01
history=[], history=[],
top_k = VECTOR_SEARCH_TOP_K, top_k = VECTOR_SEARCH_TOP_K,
max_tokens= None, # Agent 搜索互联网的时候max_tokens设置为None max_tokens= MAX_TOKENS,
prompt_name = "default", prompt_name = "default",
stream=False) stream=False)

View File

@ -1,5 +1,5 @@
from server.chat.knowledge_base_chat import knowledge_base_chat from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import json import json
import asyncio import asyncio
from server.agent import model_container from server.agent import model_container
@ -11,6 +11,7 @@ async def search_knowledge_base_iter(database: str, query: str) -> str:
temperature=0.01, temperature=0.01,
history=[], history=[],
top_k=VECTOR_SEARCH_TOP_K, top_k=VECTOR_SEARCH_TOP_K,
max_tokens=MAX_TOKENS,
prompt_name="knowledge_base_chat", prompt_name="knowledge_base_chat",
score_threshold=SCORE_THRESHOLD, score_threshold=SCORE_THRESHOLD,
stream=False) stream=False)

View File

@ -52,7 +52,7 @@ class Ws_Param(object):
return url return url
def gen_params(appid, domain,question, temperature): def gen_params(appid, domain, question, temperature, max_token):
""" """
通过appid和用户的提问来生成请参数 通过appid和用户的提问来生成请参数
""" """
@ -65,7 +65,7 @@ def gen_params(appid, domain,question, temperature):
"chat": { "chat": {
"domain": domain, "domain": domain,
"random_threshold": 0.5, "random_threshold": 0.5,
"max_tokens": None, "max_tokens": max_token,
"auditing": "default", "auditing": "default",
"temperature": temperature, "temperature": temperature,
} }

View File

@ -9,11 +9,10 @@ from server.utils import iter_over_async, asyncio
from typing import List, Dict from typing import List, Dict
async def request(appid, api_key, api_secret, Spark_url,domain, question, temperature): async def request(appid, api_key, api_secret, Spark_url, domain, question, temperature, max_token):
# print("星火:")
wsParam = SparkApi.Ws_Param(appid, api_key, api_secret, Spark_url) wsParam = SparkApi.Ws_Param(appid, api_key, api_secret, Spark_url)
wsUrl = wsParam.create_url() wsUrl = wsParam.create_url()
data = SparkApi.gen_params(appid, domain, question, temperature) data = SparkApi.gen_params(appid, domain, question, temperature, max_token)
async with websockets.connect(wsUrl) as ws: async with websockets.connect(wsUrl) as ws:
await ws.send(json.dumps(data, ensure_ascii=False)) await ws.send(json.dumps(data, ensure_ascii=False))
finish = False finish = False
@ -45,21 +44,28 @@ class XingHuoWorker(ApiModelWorker):
# TODO: 当前每次对话都要重新连接websocket确认是否可以保持连接 # TODO: 当前每次对话都要重新连接websocket确认是否可以保持连接
params.load_config(self.model_names[0]) params.load_config(self.model_names[0])
if params.is_v2: version_mapping = {
domain = "generalv2" # v2.0版本 "v1.5": {"domain": "general", "url": "ws://spark-api.xf-yun.com/v1.1/chat","max_tokens": 2048},
Spark_url = "ws://spark-api.xf-yun.com/v2.1/chat" # v2.0环境的地址 "v2.0": {"domain": "generalv2", "url": "ws://spark-api.xf-yun.com/v2.1/chat","max_tokens": 4096},
else: "v3.0": {"domain": "generalv3", "url": "ws://spark-api.xf-yun.com/v3.1/chat","max_tokens": 8192},
domain = "general" # v1.5版本 }
Spark_url = "ws://spark-api.xf-yun.com/v1.1/chat" # v1.5环境的地址
def get_version_details(version_key):
return version_mapping.get(version_key, {"domain": None, "url": None})
# 使用方法:
details = get_version_details(params.version)
domain = details["domain"]
Spark_url = details["url"]
text = "" text = ""
try: try:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
except: except:
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
params.max_tokens = min(details["max_tokens"], params.max_tokens)
for chunk in iter_over_async( for chunk in iter_over_async(
request(params.APPID, params.api_key, params.APISecret, Spark_url, domain, params.messages, params.temperature), request(params.APPID, params.api_key, params.APISecret, Spark_url, domain, params.messages,
params.temperature, params.max_tokens),
loop=loop, loop=loop,
): ):
if chunk: if chunk: