mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-25 16:23:22 +08:00
## 🛠 新增功能 - 支持百川在线模型 (@hzg0601 @liunux4odoo in #1623) - 支持 Azure OpenAI 与 claude 等 Langchain 自带模型 (@zRzRzRzRzRzRzR in #1808) - Agent 功能大量更新,支持更多的工具、更换提示词、检索知识库 (@zRzRzRzRzRzRzR in #1626 #1666 #1785) - 加长 32k 模型的历史记录 (@zRzRzRzRzRzRzR in #1629 #1630) - *_chat 接口支持 max_tokens 参数 (@liunux4odoo in #1744) - 实现 API 和 WebUI 的前后端分离 (@liunux4odoo in #1772) - 支持 zlilliz 向量库 (@zRzRzRzRzRzRzR in #1785) - 支持 metaphor 搜索引擎 (@liunux4odoo in #1792) - 支持 p-tuning 模型 (@hzg0601 in #1810) - 更新完善文档和 Wiki (@imClumsyPanda @zRzRzRzRzRzRzR @glide-the in #1680 #1811) ## 🐞 问题修复 - 修复 bge-* 模型匹配超过 1 的问题 (@zRzRzRzRzRzRzR in #1652) - 修复系统代理为空的问题 (@glide-the in #1654) - 修复重建知识库时 `d == self.d assert error` (@liunux4odoo in #1766) - 修复对话历史消息错误 (@liunux4odoo in #1801) - 修复 OpenAI 无法调用的 bug (@zRzRzRzRzRzRzR in #1808) - 修复 windows下 BIND_HOST=0.0.0.0 时对话出错的问题 (@hzg0601 in #1810)
75 lines
2.4 KiB
Python
75 lines
2.4 KiB
Python
from server.model_workers.base import ApiModelWorker
|
||
from fastchat import conversation as conv
|
||
import sys
|
||
import json
|
||
from typing import List, Literal
|
||
|
||
|
||
class ChatGLMWorker(ApiModelWorker):
|
||
BASE_URL = "https://open.bigmodel.cn/api/paas/v3/model-api"
|
||
SUPPORT_MODELS = ["chatglm_pro", "chatglm_std", "chatglm_lite"]
|
||
|
||
def __init__(
|
||
self,
|
||
*,
|
||
model_names: List[str] = ["zhipu-api"],
|
||
version: Literal["chatglm_pro", "chatglm_std", "chatglm_lite"] = "chatglm_std",
|
||
controller_addr: str,
|
||
worker_addr: str,
|
||
**kwargs,
|
||
):
|
||
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
|
||
kwargs.setdefault("context_len", 32768)
|
||
super().__init__(**kwargs)
|
||
self.version = version
|
||
|
||
# 这里的是chatglm api的模板,其它API的conv_template需要定制
|
||
self.conv = conv.Conversation(
|
||
name=self.model_names[0],
|
||
system_message="你是一个聪明的助手,请根据用户的提示来完成任务",
|
||
messages=[],
|
||
roles=["Human", "Assistant"],
|
||
sep="\n###",
|
||
stop_str="###",
|
||
)
|
||
|
||
def generate_stream_gate(self, params):
|
||
# TODO: 维护request_id
|
||
import zhipuai
|
||
|
||
super().generate_stream_gate(params)
|
||
zhipuai.api_key = self.get_config().get("api_key")
|
||
|
||
response = zhipuai.model_api.sse_invoke(
|
||
model=self.version,
|
||
prompt=[{"role": "user", "content": params["prompt"]}],
|
||
temperature=params.get("temperature"),
|
||
top_p=params.get("top_p"),
|
||
incremental=False,
|
||
)
|
||
for e in response.events():
|
||
if e.event == "add":
|
||
yield json.dumps({"error_code": 0, "text": e.data}, ensure_ascii=False).encode() + b"\0"
|
||
# TODO: 更健壮的消息处理
|
||
# elif e.event == "finish":
|
||
# ...
|
||
|
||
def get_embeddings(self, params):
|
||
# TODO: 支持embeddings
|
||
print("embedding")
|
||
# print(params)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
import uvicorn
|
||
from server.utils import MakeFastAPIOffline
|
||
from fastchat.serve.model_worker import app
|
||
|
||
worker = ChatGLMWorker(
|
||
controller_addr="http://127.0.0.1:20001",
|
||
worker_addr="http://127.0.0.1:21001",
|
||
)
|
||
sys.modules["fastchat.serve.model_worker"].worker = worker
|
||
MakeFastAPIOffline(app)
|
||
uvicorn.run(app, port=21001)
|