mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-02-07 23:43:30 +08:00
集成openai plugins插件
This commit is contained in:
parent
f1b71e5629
commit
cce2b55719
@ -99,6 +99,9 @@ async def file_chat(query: str = Body(..., description="用户输入", examples=
|
|||||||
"content": "虎头虎脑"}]]
|
"content": "虎头虎脑"}]]
|
||||||
),
|
),
|
||||||
stream: bool = Body(False, description="流式输出"),
|
stream: bool = Body(False, description="流式输出"),
|
||||||
|
endpoint_host: str = Body(False, description="接入点地址"),
|
||||||
|
endpoint_host_key: str = Body(False, description="接入点key"),
|
||||||
|
endpoint_host_proxy: str = Body(False, description="接入点代理地址"),
|
||||||
model_name: str = Body(None, description="LLM 模型名称。"),
|
model_name: str = Body(None, description="LLM 模型名称。"),
|
||||||
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||||
@ -116,6 +119,9 @@ async def file_chat(query: str = Body(..., description="用户输入", examples=
|
|||||||
max_tokens = None
|
max_tokens = None
|
||||||
|
|
||||||
model = get_ChatOpenAI(
|
model = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
|
|||||||
@ -12,12 +12,16 @@ from server.knowledge_base.kb_summary.summary_chunk import SummaryAdapter
|
|||||||
from server.utils import wrap_done, get_ChatOpenAI, BaseResponse
|
from server.utils import wrap_done, get_ChatOpenAI, BaseResponse
|
||||||
from server.knowledge_base.model.kb_document_model import DocumentWithVSId
|
from server.knowledge_base.model.kb_document_model import DocumentWithVSId
|
||||||
|
|
||||||
|
|
||||||
def recreate_summary_vector_store(
|
def recreate_summary_vector_store(
|
||||||
knowledge_base_name: str = Body(..., examples=["samples"]),
|
knowledge_base_name: str = Body(..., examples=["samples"]),
|
||||||
allow_empty_kb: bool = Body(True),
|
allow_empty_kb: bool = Body(True),
|
||||||
vs_type: str = Body(DEFAULT_VS_TYPE),
|
vs_type: str = Body(DEFAULT_VS_TYPE),
|
||||||
embed_model: str = Body(EMBEDDING_MODEL),
|
embed_model: str = Body(EMBEDDING_MODEL),
|
||||||
file_description: str = Body(''),
|
file_description: str = Body(''),
|
||||||
|
endpoint_host: str = Body(False, description="接入点地址"),
|
||||||
|
endpoint_host_key: str = Body(False, description="接入点key"),
|
||||||
|
endpoint_host_proxy: str = Body(False, description="接入点代理地址"),
|
||||||
model_name: str = Body(None, description="LLM 模型名称。"),
|
model_name: str = Body(None, description="LLM 模型名称。"),
|
||||||
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||||
@ -25,6 +29,9 @@ def recreate_summary_vector_store(
|
|||||||
"""
|
"""
|
||||||
重建单个知识库文件摘要
|
重建单个知识库文件摘要
|
||||||
:param max_tokens:
|
:param max_tokens:
|
||||||
|
:param endpoint_host:
|
||||||
|
:param endpoint_host_key:
|
||||||
|
:param endpoint_host_proxy:
|
||||||
:param model_name:
|
:param model_name:
|
||||||
:param temperature:
|
:param temperature:
|
||||||
:param file_description:
|
:param file_description:
|
||||||
@ -47,11 +54,17 @@ def recreate_summary_vector_store(
|
|||||||
kb_summary.create_kb_summary()
|
kb_summary.create_kb_summary()
|
||||||
|
|
||||||
llm = get_ChatOpenAI(
|
llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
)
|
)
|
||||||
reduce_llm = get_ChatOpenAI(
|
reduce_llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
@ -99,12 +112,18 @@ def summary_file_to_vector_store(
|
|||||||
vs_type: str = Body(DEFAULT_VS_TYPE),
|
vs_type: str = Body(DEFAULT_VS_TYPE),
|
||||||
embed_model: str = Body(EMBEDDING_MODEL),
|
embed_model: str = Body(EMBEDDING_MODEL),
|
||||||
file_description: str = Body(''),
|
file_description: str = Body(''),
|
||||||
|
endpoint_host: str = Body(False, description="接入点地址"),
|
||||||
|
endpoint_host_key: str = Body(False, description="接入点key"),
|
||||||
|
endpoint_host_proxy: str = Body(False, description="接入点代理地址"),
|
||||||
model_name: str = Body(None, description="LLM 模型名称。"),
|
model_name: str = Body(None, description="LLM 模型名称。"),
|
||||||
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
单个知识库根据文件名称摘要
|
单个知识库根据文件名称摘要
|
||||||
|
:param endpoint_host:
|
||||||
|
:param endpoint_host_key:
|
||||||
|
:param endpoint_host_proxy:
|
||||||
:param model_name:
|
:param model_name:
|
||||||
:param max_tokens:
|
:param max_tokens:
|
||||||
:param temperature:
|
:param temperature:
|
||||||
@ -127,11 +146,17 @@ def summary_file_to_vector_store(
|
|||||||
kb_summary.create_kb_summary()
|
kb_summary.create_kb_summary()
|
||||||
|
|
||||||
llm = get_ChatOpenAI(
|
llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
)
|
)
|
||||||
reduce_llm = get_ChatOpenAI(
|
reduce_llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
@ -171,6 +196,9 @@ def summary_doc_ids_to_vector_store(
|
|||||||
vs_type: str = Body(DEFAULT_VS_TYPE),
|
vs_type: str = Body(DEFAULT_VS_TYPE),
|
||||||
embed_model: str = Body(EMBEDDING_MODEL),
|
embed_model: str = Body(EMBEDDING_MODEL),
|
||||||
file_description: str = Body(''),
|
file_description: str = Body(''),
|
||||||
|
endpoint_host: str = Body(False, description="接入点地址"),
|
||||||
|
endpoint_host_key: str = Body(False, description="接入点key"),
|
||||||
|
endpoint_host_proxy: str = Body(False, description="接入点代理地址"),
|
||||||
model_name: str = Body(None, description="LLM 模型名称。"),
|
model_name: str = Body(None, description="LLM 模型名称。"),
|
||||||
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
temperature: float = Body(0.01, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||||
@ -178,6 +206,9 @@ def summary_doc_ids_to_vector_store(
|
|||||||
"""
|
"""
|
||||||
单个知识库根据doc_ids摘要
|
单个知识库根据doc_ids摘要
|
||||||
:param knowledge_base_name:
|
:param knowledge_base_name:
|
||||||
|
:param endpoint_host:
|
||||||
|
:param endpoint_host_key:
|
||||||
|
:param endpoint_host_proxy:
|
||||||
:param doc_ids:
|
:param doc_ids:
|
||||||
:param model_name:
|
:param model_name:
|
||||||
:param max_tokens:
|
:param max_tokens:
|
||||||
@ -192,11 +223,17 @@ def summary_doc_ids_to_vector_store(
|
|||||||
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}", data={})
|
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}", data={})
|
||||||
else:
|
else:
|
||||||
llm = get_ChatOpenAI(
|
llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
)
|
)
|
||||||
reduce_llm = get_ChatOpenAI(
|
reduce_llm = get_ChatOpenAI(
|
||||||
|
endpoint_host=endpoint_host,
|
||||||
|
endpoint_host_key=endpoint_host_key,
|
||||||
|
endpoint_host_proxy=endpoint_host_proxy,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
|
|||||||
@ -8,7 +8,6 @@ from pprint import pprint
|
|||||||
from langchain.agents import AgentExecutor
|
from langchain.agents import AgentExecutor
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
from langchain_openai.chat_models import ChatOpenAI
|
||||||
# from langchain.chat_models.openai import ChatOpenAI
|
# from langchain.chat_models.openai import ChatOpenAI
|
||||||
from server.utils import get_ChatOpenAI
|
|
||||||
from server.agent.tools_factory.tools_registry import all_tools
|
from server.agent.tools_factory.tools_registry import all_tools
|
||||||
from server.agent.agent_factory.qwen_agent import create_structured_qwen_chat_agent
|
from server.agent.agent_factory.qwen_agent import create_structured_qwen_chat_agent
|
||||||
from server.callback_handler.agent_callback_handler import AgentExecutorAsyncIteratorCallbackHandler
|
from server.callback_handler.agent_callback_handler import AgentExecutorAsyncIteratorCallbackHandler
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user