mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-19 21:37:20 +08:00
删除本地fschat配置,pydantic升级到2
This commit is contained in:
parent
777b7c3499
commit
175db6710e
@ -1,2 +1,2 @@
|
||||
from .glm3_agent import create_structured_glm3_chat_agent
|
||||
# from .glm3_agent import create_structured_glm3_chat_agent
|
||||
from .qwen_agent import create_structured_qwen_chat_agent
|
||||
|
||||
@ -8,8 +8,7 @@ from langchain_core.messages import SystemMessage
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from server.agent.agent_factory import (create_structured_glm3_chat_agent,
|
||||
create_structured_qwen_chat_agent)
|
||||
from server.agent.agent_factory import ( create_structured_qwen_chat_agent)
|
||||
|
||||
|
||||
def agents_registry(
|
||||
@ -24,7 +23,8 @@ def agents_registry(
|
||||
# Write any optimized method here.
|
||||
if "glm3" in llm.model_name.lower():
|
||||
# An optimized method of langchain Agent that uses the glm3 series model
|
||||
agent = create_structured_glm3_chat_agent(llm=llm, tools=tools)
|
||||
# agent = create_structured_glm3_chat_agent(llm=llm, tools=tools)
|
||||
pass
|
||||
elif "qwen" in llm.model_name.lower():
|
||||
agent = create_structured_qwen_chat_agent(llm=llm, tools=tools)
|
||||
else:
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from langchain.docstore.document import Document
|
||||
from configs import EMBEDDING_MODEL, logger
|
||||
from server.model_workers.base import ApiEmbeddingsParams
|
||||
# from server.model_workers.base import ApiEmbeddingsParams
|
||||
from server.utils import BaseResponse, get_model_worker_config, list_embed_models, list_online_embed_models
|
||||
from fastapi import Body
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
@ -30,8 +30,8 @@ def embed_texts(
|
||||
embed_model = config.get("embed_model")
|
||||
worker = worker_class()
|
||||
if worker_class.can_embedding():
|
||||
params = ApiEmbeddingsParams(texts=texts, to_query=to_query, embed_model=embed_model)
|
||||
resp = worker.do_embeddings(params)
|
||||
# params = ApiEmbeddingsParams(texts=texts, to_query=to_query)
|
||||
resp = worker.do_embeddings(None)
|
||||
return BaseResponse(**resp)
|
||||
|
||||
return BaseResponse(code=500, msg=f"指定的模型 {embed_model} 不支持 Embeddings 功能。")
|
||||
|
||||
@ -147,7 +147,6 @@ def parse_args() -> argparse.ArgumentParser:
|
||||
def dump_server_info(after_start=False, args=None):
|
||||
import platform
|
||||
import langchain
|
||||
import fastchat
|
||||
from server.utils import api_address, webui_address
|
||||
|
||||
print("\n")
|
||||
@ -155,7 +154,7 @@ def dump_server_info(after_start=False, args=None):
|
||||
print(f"操作系统:{platform.platform()}.")
|
||||
print(f"python版本:{sys.version}")
|
||||
print(f"项目版本:{VERSION}")
|
||||
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
|
||||
print(f"langchain版本:{langchain.__version__}")
|
||||
print("\n")
|
||||
|
||||
print(f"当前使用的分词器:{TEXT_SPLITTER_NAME}")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user