mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-23 07:13:27 +08:00
## 🛠 新增功能 - 支持百川在线模型 (@hzg0601 @liunux4odoo in #1623) - 支持 Azure OpenAI 与 claude 等 Langchain 自带模型 (@zRzRzRzRzRzRzR in #1808) - Agent 功能大量更新,支持更多的工具、更换提示词、检索知识库 (@zRzRzRzRzRzRzR in #1626 #1666 #1785) - 加长 32k 模型的历史记录 (@zRzRzRzRzRzRzR in #1629 #1630) - *_chat 接口支持 max_tokens 参数 (@liunux4odoo in #1744) - 实现 API 和 WebUI 的前后端分离 (@liunux4odoo in #1772) - 支持 zlilliz 向量库 (@zRzRzRzRzRzRzR in #1785) - 支持 metaphor 搜索引擎 (@liunux4odoo in #1792) - 支持 p-tuning 模型 (@hzg0601 in #1810) - 更新完善文档和 Wiki (@imClumsyPanda @zRzRzRzRzRzRzR @glide-the in #1680 #1811) ## 🐞 问题修复 - 修复 bge-* 模型匹配超过 1 的问题 (@zRzRzRzRzRzRzR in #1652) - 修复系统代理为空的问题 (@glide-the in #1654) - 修复重建知识库时 `d == self.d assert error` (@liunux4odoo in #1766) - 修复对话历史消息错误 (@liunux4odoo in #1801) - 修复 OpenAI 无法调用的 bug (@zRzRzRzRzRzRzR in #1808) - 修复 windows下 BIND_HOST=0.0.0.0 时对话出错的问题 (@hzg0601 in #1810)
88 lines
2.6 KiB
Python
88 lines
2.6 KiB
Python
from configs.basic_config import LOG_PATH
|
||
import fastchat.constants
|
||
fastchat.constants.LOGDIR = LOG_PATH
|
||
from fastchat.serve.base_model_worker import BaseModelWorker
|
||
import uuid
|
||
import json
|
||
import sys
|
||
from pydantic import BaseModel
|
||
import fastchat
|
||
import asyncio
|
||
from typing import Dict, List
|
||
|
||
|
||
# 恢复被fastchat覆盖的标准输出
|
||
sys.stdout = sys.__stdout__
|
||
sys.stderr = sys.__stderr__
|
||
|
||
|
||
class ApiModelOutMsg(BaseModel):
|
||
error_code: int = 0
|
||
text: str
|
||
|
||
class ApiModelWorker(BaseModelWorker):
|
||
BASE_URL: str
|
||
SUPPORT_MODELS: List
|
||
|
||
def __init__(
|
||
self,
|
||
model_names: List[str],
|
||
controller_addr: str,
|
||
worker_addr: str,
|
||
context_len: int = 2048,
|
||
**kwargs,
|
||
):
|
||
kwargs.setdefault("worker_id", uuid.uuid4().hex[:8])
|
||
kwargs.setdefault("model_path", "")
|
||
kwargs.setdefault("limit_worker_concurrency", 5)
|
||
super().__init__(model_names=model_names,
|
||
controller_addr=controller_addr,
|
||
worker_addr=worker_addr,
|
||
**kwargs)
|
||
self.context_len = context_len
|
||
self.semaphore = asyncio.Semaphore(self.limit_worker_concurrency)
|
||
self.init_heart_beat()
|
||
|
||
def count_token(self, params):
|
||
# TODO:需要完善
|
||
# print("count token")
|
||
prompt = params["prompt"]
|
||
return {"count": len(str(prompt)), "error_code": 0}
|
||
|
||
def generate_stream_gate(self, params):
|
||
self.call_ct += 1
|
||
|
||
def generate_gate(self, params):
|
||
for x in self.generate_stream_gate(params):
|
||
pass
|
||
return json.loads(x[:-1].decode())
|
||
|
||
def get_embeddings(self, params):
|
||
print("embedding")
|
||
# print(params)
|
||
|
||
# help methods
|
||
def get_config(self):
|
||
from server.utils import get_model_worker_config
|
||
return get_model_worker_config(self.model_names[0])
|
||
|
||
def prompt_to_messages(self, prompt: str) -> List[Dict]:
|
||
'''
|
||
将prompt字符串拆分成messages.
|
||
'''
|
||
result = []
|
||
user_role = self.conv.roles[0]
|
||
ai_role = self.conv.roles[1]
|
||
user_start = user_role + ":"
|
||
ai_start = ai_role + ":"
|
||
for msg in prompt.split(self.conv.sep)[1:-1]:
|
||
if msg.startswith(user_start):
|
||
if content := msg[len(user_start):].strip():
|
||
result.append({"role": user_role, "content": content})
|
||
elif msg.startswith(ai_start):
|
||
if content := msg[len(ai_start):].strip():
|
||
result.append({"role": ai_role, "content": content})
|
||
else:
|
||
raise RuntimeError(f"unknown role in msg: {msg}")
|
||
return result
|