mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-02-05 22:33:24 +08:00
跟新了langchain 0.1.x需要的依赖和修改的代码
This commit is contained in:
parent
d144ff47c9
commit
5df19d907b
@ -5,8 +5,10 @@ torch>=2.1.2
|
|||||||
torchvision>=0.16.2
|
torchvision>=0.16.2
|
||||||
torchaudio>=2.1.2
|
torchaudio>=2.1.2
|
||||||
|
|
||||||
langchain>=0.0.352
|
langchain>=0.1.0
|
||||||
langchain-experimental>=0.0.47
|
langchain_openai>=0.0.2
|
||||||
|
langchain-community>=1.0.0
|
||||||
|
|
||||||
pydantic==1.10.13
|
pydantic==1.10.13
|
||||||
fschat==0.2.35
|
fschat==0.2.35
|
||||||
openai==1.9.0
|
openai==1.9.0
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
"""
|
"""
|
||||||
This file is a modified version for ChatGLM3-6B the original glm3_agent.py file from the langchain repo.
|
This file is a modified version for ChatGLM3-6B the original glm3_agent.py file from the langchain repo.
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, List, Sequence, Tuple, Optional, Union
|
from typing import Any, List, Sequence, Tuple, Optional, Union
|
||||||
@ -21,9 +21,7 @@ from langchain.agents.agent import AgentExecutor
|
|||||||
from langchain.callbacks.base import BaseCallbackHandler
|
from langchain.callbacks.base import BaseCallbackHandler
|
||||||
from langchain.schema.language_model import BaseLanguageModel
|
from langchain.schema.language_model import BaseLanguageModel
|
||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
from langchain_core.callbacks import Callbacks
|
|
||||||
|
|
||||||
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -148,7 +146,6 @@ class StructuredGLM3ChatAgent(Agent):
|
|||||||
formatted_tools = formatted_tools.replace("'", "\\'").replace("{", "{{").replace("}", "}}")
|
formatted_tools = formatted_tools.replace("'", "\\'").replace("{", "{{").replace("}", "}}")
|
||||||
template = prompt.format(tool_names=tool_names,
|
template = prompt.format(tool_names=tool_names,
|
||||||
tools=formatted_tools,
|
tools=formatted_tools,
|
||||||
history="None",
|
|
||||||
input="{input}",
|
input="{input}",
|
||||||
agent_scratchpad="{agent_scratchpad}")
|
agent_scratchpad="{agent_scratchpad}")
|
||||||
|
|
||||||
@ -169,7 +166,6 @@ class StructuredGLM3ChatAgent(Agent):
|
|||||||
prompt: str = None,
|
prompt: str = None,
|
||||||
callbacks: List[BaseCallbackHandler] = [],
|
callbacks: List[BaseCallbackHandler] = [],
|
||||||
output_parser: Optional[AgentOutputParser] = None,
|
output_parser: Optional[AgentOutputParser] = None,
|
||||||
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
|
||||||
input_variables: Optional[List[str]] = None,
|
input_variables: Optional[List[str]] = None,
|
||||||
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
@ -229,6 +225,5 @@ def initialize_glm3_agent(
|
|||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
tags=tags_,
|
tags=tags_,
|
||||||
intermediate_steps=[],
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|||||||
32
server/agent/agent_instruct.md
Normal file
32
server/agent/agent_instruct.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# What tools should be used
|
||||||
|
|
||||||
|
# search_internet
|
||||||
|
|
||||||
|
使用这个工具是因为用户需要在联网进行搜索。这些问题通常是你不知道的,这些问题具有特点,
|
||||||
|
|
||||||
|
例如:
|
||||||
|
|
||||||
|
+ 联网帮我查询 xxx
|
||||||
|
+ 我想知道最新的新闻
|
||||||
|
|
||||||
|
或者,用户有明显的意图,需要获取事实的信息。
|
||||||
|
|
||||||
|
返回字段如下
|
||||||
|
|
||||||
|
```
|
||||||
|
search_internet
|
||||||
|
```
|
||||||
|
|
||||||
|
# search_local_knowledge
|
||||||
|
|
||||||
|
使用这个工具是希望用户能够获取本地的知识,这些知识通常是你自身能力不具备的专业问题,或者用户指定了某个任务的。
|
||||||
|
|
||||||
|
例如:
|
||||||
|
|
||||||
|
+ 告诉我 关于 xxx 的 xxx 信息
|
||||||
|
+ xxx 中 xxx 的 xxx 是什么
|
||||||
|
|
||||||
|
返回字段如下
|
||||||
|
```
|
||||||
|
search_local_knowledge
|
||||||
|
```
|
||||||
@ -11,7 +11,7 @@ def calculate(a: float, b: float, operator: str) -> float:
|
|||||||
if b != 0:
|
if b != 0:
|
||||||
return a / b
|
return a / b
|
||||||
else:
|
else:
|
||||||
return float('inf') # 防止除以零
|
return float('inf')
|
||||||
elif operator == "^":
|
elif operator == "^":
|
||||||
return a ** b
|
return a ** b
|
||||||
else:
|
else:
|
||||||
|
|||||||
@ -1,5 +1,4 @@
|
|||||||
# Langchain 自带的 YouTube 搜索工具封装
|
from langchain_community.tools import YouTubeSearchTool
|
||||||
from langchain.tools import YouTubeSearchTool
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
def search_youtube(query: str):
|
def search_youtube(query: str):
|
||||||
tool = YouTubeSearchTool()
|
tool = YouTubeSearchTool()
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
# LangChain 的 Shell 工具
|
# LangChain 的 Shell 工具
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from langchain.tools import ShellTool
|
from langchain_community.tools import ShellTool
|
||||||
def shell(query: str):
|
def shell(query: str):
|
||||||
tool = ShellTool()
|
tool = ShellTool()
|
||||||
return tool.run(tool_input=query)
|
return tool.run(tool_input=query)
|
||||||
|
|||||||
@ -5,7 +5,8 @@ from typing import List, Union, AsyncIterable, Dict
|
|||||||
from fastapi import Body
|
from fastapi import Body
|
||||||
from fastapi.responses import StreamingResponse
|
from fastapi.responses import StreamingResponse
|
||||||
|
|
||||||
from langchain.agents import initialize_agent, AgentType
|
from langchain.agents import initialize_agent, AgentType, create_structured_chat_agent, AgentExecutor
|
||||||
|
from langchain_core.messages import HumanMessage, AIMessage
|
||||||
from langchain_core.output_parsers import StrOutputParser
|
from langchain_core.output_parsers import StrOutputParser
|
||||||
from langchain.chains import LLMChain
|
from langchain.chains import LLMChain
|
||||||
from langchain.prompts.chat import ChatPromptTemplate
|
from langchain.prompts.chat import ChatPromptTemplate
|
||||||
@ -83,7 +84,6 @@ def create_models_chains(history, history_len, prompts, models, tools, callbacks
|
|||||||
llm=models["action_model"],
|
llm=models["action_model"],
|
||||||
tools=tools,
|
tools=tools,
|
||||||
prompt=prompts["action_model"],
|
prompt=prompts["action_model"],
|
||||||
memory=memory,
|
|
||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
@ -92,7 +92,6 @@ def create_models_chains(history, history_len, prompts, models, tools, callbacks
|
|||||||
llm=models["action_model"],
|
llm=models["action_model"],
|
||||||
tools=tools,
|
tools=tools,
|
||||||
prompt=prompts["action_model"],
|
prompt=prompts["action_model"],
|
||||||
memory=memory,
|
|
||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
@ -102,7 +101,6 @@ def create_models_chains(history, history_len, prompts, models, tools, callbacks
|
|||||||
tools=tools,
|
tools=tools,
|
||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
||||||
memory=memory,
|
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -111,7 +109,7 @@ def create_models_chains(history, history_len, prompts, models, tools, callbacks
|
|||||||
# chain
|
# chain
|
||||||
# )
|
# )
|
||||||
# full_chain = ({"topic": classifier_chain, "input": lambda x: x["input"]} | branch)
|
# full_chain = ({"topic": classifier_chain, "input": lambda x: x["input"]} | branch)
|
||||||
full_chain = ({"input": lambda x: x["input"]} | agent_executor)
|
full_chain = ({"input": lambda x: x["input"], } | agent_executor)
|
||||||
else:
|
else:
|
||||||
chain.llm.callbacks = callbacks
|
chain.llm.callbacks = callbacks
|
||||||
full_chain = ({"input": lambda x: x["input"]} | chain)
|
full_chain = ({"input": lambda x: x["input"]} | chain)
|
||||||
@ -146,14 +144,9 @@ async def chat(query: str = Body(..., description="用户输入", examples=["恼
|
|||||||
|
|
||||||
callback = AgentExecutorAsyncIteratorCallbackHandler()
|
callback = AgentExecutorAsyncIteratorCallbackHandler()
|
||||||
callbacks = [callback]
|
callbacks = [callback]
|
||||||
|
|
||||||
# 从配置中选择模型
|
|
||||||
models, prompts = create_models_from_config(callbacks=[], configs=model_config, stream=stream)
|
models, prompts = create_models_from_config(callbacks=[], configs=model_config, stream=stream)
|
||||||
|
|
||||||
# 从配置中选择工具
|
|
||||||
tools = [tool for tool in all_tools if tool.name in tool_config]
|
tools = [tool for tool in all_tools if tool.name in tool_config]
|
||||||
tools = [t.copy(update={"callbacks": callbacks}) for t in tools]
|
tools = [t.copy(update={"callbacks": callbacks}) for t in tools]
|
||||||
# 构建完整的Chain
|
|
||||||
full_chain = create_models_chains(prompts=prompts,
|
full_chain = create_models_chains(prompts=prompts,
|
||||||
models=models,
|
models=models,
|
||||||
conversation_id=conversation_id,
|
conversation_id=conversation_id,
|
||||||
@ -163,6 +156,7 @@ async def chat(query: str = Body(..., description="用户输入", examples=["恼
|
|||||||
history_len=history_len,
|
history_len=history_len,
|
||||||
metadata=metadata)
|
metadata=metadata)
|
||||||
task = asyncio.create_task(wrap_done(full_chain.ainvoke({"input": query}), callback.done))
|
task = asyncio.create_task(wrap_done(full_chain.ainvoke({"input": query}), callback.done))
|
||||||
|
|
||||||
async for chunk in callback.aiter():
|
async for chunk in callback.aiter():
|
||||||
data = json.loads(chunk)
|
data = json.loads(chunk)
|
||||||
data["message_id"] = message_id
|
data["message_id"] = message_id
|
||||||
|
|||||||
@ -17,8 +17,9 @@ from langchain.text_splitter import TextSplitter
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from server.utils import run_in_thread_pool, get_model_worker_config
|
from server.utils import run_in_thread_pool, get_model_worker_config
|
||||||
import json
|
import json
|
||||||
from typing import List, Union,Dict, Tuple, Generator
|
from typing import List, Union, Dict, Tuple, Generator
|
||||||
import chardet
|
import chardet
|
||||||
|
from langchain_community.document_loaders import JSONLoader
|
||||||
|
|
||||||
|
|
||||||
def validate_kb_name(knowledge_base_id: str) -> bool:
|
def validate_kb_name(knowledge_base_id: str) -> bool:
|
||||||
@ -122,15 +123,13 @@ def _new_json_dumps(obj, **kwargs):
|
|||||||
kwargs["ensure_ascii"] = False
|
kwargs["ensure_ascii"] = False
|
||||||
return _origin_json_dumps(obj, **kwargs)
|
return _origin_json_dumps(obj, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
if json.dumps is not _new_json_dumps:
|
if json.dumps is not _new_json_dumps:
|
||||||
_origin_json_dumps = json.dumps
|
_origin_json_dumps = json.dumps
|
||||||
json.dumps = _new_json_dumps
|
json.dumps = _new_json_dumps
|
||||||
|
|
||||||
|
|
||||||
class JSONLinesLoader(langchain.document_loaders.JSONLoader):
|
class JSONLinesLoader(JSONLoader):
|
||||||
'''
|
|
||||||
行式 Json 加载器,要求文件扩展名为 .jsonl
|
|
||||||
'''
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self._json_lines = True
|
self._json_lines = True
|
||||||
|
|||||||
@ -9,8 +9,8 @@ from configs import (LLM_MODEL_CONFIG, LLM_DEVICE, EMBEDDING_DEVICE,
|
|||||||
FSCHAT_MODEL_WORKERS, HTTPX_DEFAULT_TIMEOUT)
|
FSCHAT_MODEL_WORKERS, HTTPX_DEFAULT_TIMEOUT)
|
||||||
import os
|
import os
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
from langchain.chat_models import ChatOpenAI
|
from langchain_openai.chat_models import ChatOpenAI
|
||||||
from langchain.llms import OpenAI
|
from langchain_community.llms import OpenAI
|
||||||
import httpx
|
import httpx
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
|
|||||||
7
update_requirements.sh
Normal file
7
update_requirements.sh
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
python -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
while read requirement; do
|
||||||
|
python -m pip install --upgrade "$requirement" -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
done < requirements.txt
|
||||||
Loading…
x
Reference in New Issue
Block a user