mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-23 07:13:27 +08:00
## 🛠 新增功能 - 支持百川在线模型 (@hzg0601 @liunux4odoo in #1623) - 支持 Azure OpenAI 与 claude 等 Langchain 自带模型 (@zRzRzRzRzRzRzR in #1808) - Agent 功能大量更新,支持更多的工具、更换提示词、检索知识库 (@zRzRzRzRzRzRzR in #1626 #1666 #1785) - 加长 32k 模型的历史记录 (@zRzRzRzRzRzRzR in #1629 #1630) - *_chat 接口支持 max_tokens 参数 (@liunux4odoo in #1744) - 实现 API 和 WebUI 的前后端分离 (@liunux4odoo in #1772) - 支持 zlilliz 向量库 (@zRzRzRzRzRzRzR in #1785) - 支持 metaphor 搜索引擎 (@liunux4odoo in #1792) - 支持 p-tuning 模型 (@hzg0601 in #1810) - 更新完善文档和 Wiki (@imClumsyPanda @zRzRzRzRzRzRzR @glide-the in #1680 #1811) ## 🐞 问题修复 - 修复 bge-* 模型匹配超过 1 的问题 (@zRzRzRzRzRzRzR in #1652) - 修复系统代理为空的问题 (@glide-the in #1654) - 修复重建知识库时 `d == self.d assert error` (@liunux4odoo in #1766) - 修复对话历史消息错误 (@liunux4odoo in #1801) - 修复 OpenAI 无法调用的 bug (@zRzRzRzRzRzRzR in #1808) - 修复 windows下 BIND_HOST=0.0.0.0 时对话出错的问题 (@hzg0601 in #1810)
72 lines
2.1 KiB
Python
72 lines
2.1 KiB
Python
import requests
|
||
import json
|
||
import sys
|
||
from pathlib import Path
|
||
|
||
root_path = Path(__file__).parent.parent.parent
|
||
sys.path.append(str(root_path))
|
||
from configs.server_config import FSCHAT_MODEL_WORKERS
|
||
from configs.model_config import LLM_MODEL
|
||
from server.utils import api_address, get_model_worker_config
|
||
|
||
from pprint import pprint
|
||
import random
|
||
from typing import List
|
||
|
||
|
||
def get_configured_models() -> List[str]:
|
||
model_workers = list(FSCHAT_MODEL_WORKERS)
|
||
if "default" in model_workers:
|
||
model_workers.remove("default")
|
||
return model_workers
|
||
|
||
|
||
api_base_url = api_address()
|
||
|
||
|
||
def get_running_models(api="/llm_model/list_models"):
|
||
url = api_base_url + api
|
||
r = requests.post(url)
|
||
if r.status_code == 200:
|
||
return r.json()["data"]
|
||
return []
|
||
|
||
|
||
def test_running_models(api="/llm_model/list_running_models"):
|
||
url = api_base_url + api
|
||
r = requests.post(url)
|
||
assert r.status_code == 200
|
||
print("\n获取当前正在运行的模型列表:")
|
||
pprint(r.json())
|
||
assert isinstance(r.json()["data"], list)
|
||
assert len(r.json()["data"]) > 0
|
||
|
||
|
||
# 不建议使用stop_model功能。按现在的实现,停止了就只能手动再启动
|
||
# def test_stop_model(api="/llm_model/stop"):
|
||
# url = api_base_url + api
|
||
# r = requests.post(url, json={""})
|
||
|
||
|
||
def test_change_model(api="/llm_model/change_model"):
|
||
url = api_base_url + api
|
||
|
||
running_models = get_running_models()
|
||
assert len(running_models) > 0
|
||
|
||
model_workers = get_configured_models()
|
||
|
||
availabel_new_models = list(set(model_workers) - set(running_models))
|
||
assert len(availabel_new_models) > 0
|
||
print(availabel_new_models)
|
||
|
||
local_models = [x for x in running_models if not get_model_worker_config(x).get("online_api")]
|
||
model_name = random.choice(local_models)
|
||
new_model_name = random.choice(availabel_new_models)
|
||
print(f"\n尝试将模型从 {model_name} 切换到 {new_model_name}")
|
||
r = requests.post(url, json={"model_name": model_name, "new_model_name": new_model_name})
|
||
assert r.status_code == 200
|
||
|
||
running_models = get_running_models()
|
||
assert new_model_name in running_models
|