Merge pull request #2783 from chatchat-space/dev

解决部分依赖问题
This commit is contained in:
zR 2024-01-24 17:04:37 +08:00 committed by GitHub
commit d4ab1a69f6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 88 additions and 84 deletions

View File

@ -224,6 +224,7 @@ MODEL_PATH = {
"dolly-v2-12b": "databricks/dolly-v2-12b", "dolly-v2-12b": "databricks/dolly-v2-12b",
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b", "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
}, },
"reranker": { "reranker": {
"bge-reranker-large": "BAAI/bge-reranker-large", "bge-reranker-large": "BAAI/bge-reranker-large",
"bge-reranker-base": "BAAI/bge-reranker-base", "bge-reranker-base": "BAAI/bge-reranker-base",

View File

@ -40,8 +40,6 @@ FSCHAT_MODEL_WORKERS = {
"device": LLM_DEVICE, "device": LLM_DEVICE,
# False,'vllm',使用的推理加速框架,使用vllm如果出现HuggingFace通信问题参见doc/FAQ # False,'vllm',使用的推理加速框架,使用vllm如果出现HuggingFace通信问题参见doc/FAQ
# vllm对一些模型支持还不成熟暂时默认关闭 # vllm对一些模型支持还不成熟暂时默认关闭
# fschat=0.2.33的代码有bug, 如需使用源码修改fastchat.server.vllm_worker
# 将103行中sampling_params = SamplingParams的参数stop=list(stop)修改为stop= [i for i in stop if i!=""]
"infer_turbo": False, "infer_turbo": False,
# model_worker多卡加载需要配置的参数 # model_worker多卡加载需要配置的参数

@ -1 +1 @@
Subproject commit 2f24adb218f23eab00d7fcd7ccf5072f2f35cb3c Subproject commit 28f664aa08f8191a70339c9ecbe7a89b35a1032a

View File

@ -1,61 +1,66 @@
torch~=2.1.2 torch==2.1.2
torchvision~=0.16.2 torchvision==0.16.2
torchaudio~=2.1.2 torchaudio==2.1.2
xformers~=0.0.23.post1 xformers==0.0.23.post1
transformers~=4.36.2 transformers==4.36.2
sentence_transformers~=2.2.2 sentence_transformers==2.2.2
langchain==0.0.354 langchain==0.0.354
langchain-experimental==0.0.47 langchain-experimental==0.0.47
pydantic==1.10.13 pydantic==1.10.13
fschat~=0.2.35 fschat==0.2.35
openai~=1.9.0 openai==1.9.0
fastapi~=0.109.0 fastapi==0.109.0
sse_starlette~=1.8.2 sse_starlette==1.8.2
nltk~=3.8.1 nltk==3.8.1
uvicorn~=0.24.0.post1 uvicorn==0.24.0.post1
starlette~=0.32.0 starlette==0.35.0
unstructured[all-docs]~=0.12.0 unstructured[all-docs] # ==0.11.8
python-magic-bin; sys_platform ~= 'win32' python-magic-bin; sys_platform == 'win32'
SQLAlchemy~=2.0.25 SQLAlchemy==2.0.25
faiss-cpu~=1.7.4 faiss-cpu==1.7.4
accelerate~=0.24.1 accelerate==0.24.1
spacy~=3.7.2 spacy==3.7.2
PyMuPDF~=1.23.16 PyMuPDF==1.23.16
rapidocr_onnxruntime~=1.3.8 rapidocr_onnxruntime==1.3.8
requests~=2.31.0 requests==2.31.0
pathlib~=1.0.1 pathlib==1.0.1
pytest~=7.4.3 pytest==7.4.3
numexpr~=2.8.6 numexpr==2.8.6
strsimpy~=0.2.1 strsimpy==0.2.1
markdownify~=0.11.6 markdownify==0.11.6
tiktoken~=0.5.2 tiktoken==0.5.2
tqdm~=4.66.1 tqdm==4.66.1
websockets~=12.0 websockets==12.0
numpy~=1.24.4 numpy==1.24.4
pandas~=2.0.3 pandas==2.0.3
einops~=0.7.0 einops==0.7.0
transformers_stream_generator~=0.0.4 transformers_stream_generator==0.0.4
vllm~=0.2.7; sys_platform ~= "linux" vllm==0.2.7; sys_platform == "linux"
jq~=1.6.0 llama-index==0.9.35
beautifulsoup4~=4.12.2
pysrt~=1.1.2 #jq==1.6.0
dashscope~=1.13.6 # qwen # beautifulsoup4==4.12.2
# volcengine~=1.0.119 # fangzhou # pysrt==1.1.2
# dashscope==1.13.6 # qwen
# volcengine==1.0.119 # fangzhou
# uncomment libs if you want to use corresponding vector store # uncomment libs if you want to use corresponding vector store
# pymilvus~=2.3.4 # pymilvus==2.3.4
# psycopg2~=2.9.9 # psycopg2==2.9.9
# pgvector~=0.2.4 # pgvector==0.2.4
# flash-attn~=2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat #flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
#rapidocr_paddle[gpu]~=1.3.0.post5 # gpu accelleration for ocr of pdf and image files #autoawq==0.1.8 # For Int4
arxiv~=2.1.0 #rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files
youtube-search~=2.1.2
duckduckgo-search~=3.9.9 arxiv==2.1.0
metaphor-python~=0.1.23 youtube-search==2.1.2
streamlit~=1.30.0 duckduckgo-search==3.9.9
streamlit-option-menu~=0.3.12 metaphor-python==0.1.23
streamlit-antd-components~=0.3.1 streamlit==1.30.0
streamlit-chatbox~=1.1.11 streamlit-option-menu==0.3.12
streamlit-modal~=0.1.0 streamlit-antd-components==0.3.1
streamlit-aggrid~=0.3.4.post3 streamlit-chatbox==1.1.11
httpx~=0.26.0 streamlit-modal==0.1.0
watchdog~=3.0.0 streamlit-aggrid==0.3.4.post3
httpx==0.26.0
watchdog==3.0.0
jwt==1.3.1

View File

@ -9,11 +9,11 @@ langchain-experimental==0.0.47
pydantic==1.10.13 pydantic==1.10.13
fschat==0.2.35 fschat==0.2.35
openai~=1.9.0 openai~=1.9.0
fastapi~=0.108.0 fastapi~=0.109.0
sse_starlette==1.8.2 sse_starlette==1.8.2
nltk>=3.8.1 nltk>=3.8.1
uvicorn>=0.24.0.post1 uvicorn>=0.24.0.post1
starlette~=0.32.0 starlette~=0.35.0
unstructured[all-docs]==0.11.0 unstructured[all-docs]==0.11.0
python-magic-bin; sys_platform == 'win32' python-magic-bin; sys_platform == 'win32'
SQLAlchemy==2.0.19 SQLAlchemy==2.0.19
@ -37,19 +37,21 @@ einops>=0.7.0
transformers_stream_generator==0.0.4 transformers_stream_generator==0.0.4
vllm==0.2.7; sys_platform == "linux" vllm==0.2.7; sys_platform == "linux"
httpx==0.26.0 httpx==0.26.0
llama-index llama-index==0.9.35
jq==1.6.0
beautifulsoup4~=4.12.2 # jq==1.6.0
pysrt~=1.1.2 # beautifulsoup4~=4.12.2
dashscope==1.13.6 # pysrt~=1.1.2
arxiv~=2.1.0 # dashscope==1.13.6
youtube-search~=2.1.2 # arxiv~=2.1.0
duckduckgo-search~=3.9.9 # youtube-search~=2.1.2
metaphor-python~=0.1.23 # duckduckgo-search~=3.9.9
# metaphor-python~=0.1.23
# volcengine>=1.0.119 # volcengine>=1.0.119
# pymilvus>=2.3.4 # pymilvus>=2.3.4
# psycopg2==2.9.9 # psycopg2==2.9.9
# pgvector>=0.2.4 # pgvector>=0.2.4
# flash-attn>=2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat #flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
# rapidocr_paddle[gpu]>=1.3.0.post5 #autoawq==0.1.8 # For Int4
#rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files

View File

@ -7,9 +7,9 @@ fastapi~=0.109.0
sse_starlette~=1.8.2 sse_starlette~=1.8.2
nltk~=3.8.1 nltk~=3.8.1
uvicorn~=0.24.0.post1 uvicorn~=0.24.0.post1
starlette~=0.32.0 starlette~=0.35.0
unstructured[all-docs]~=0.12.0 unstructured[all-docs]~=0.12.0
python-magic-bin; sys_platform ~= 'win32' python-magic-bin; sys_platform == 'win32'
SQLAlchemy~=2.0.25 SQLAlchemy~=2.0.25
faiss-cpu~=1.7.4 faiss-cpu~=1.7.4
accelerate~=0.24.1 accelerate~=0.24.1
@ -19,6 +19,8 @@ rapidocr_onnxruntime~=1.3.8
requests~=2.31.0 requests~=2.31.0
pathlib~=1.0.1 pathlib~=1.0.1
pytest~=7.4.3 pytest~=7.4.3
llama-index==0.9.35
dashscope==1.13.6 dashscope==1.13.6
arxiv~=2.1.0 arxiv~=2.1.0
youtube-search~=2.1.2 youtube-search~=2.1.2
@ -28,5 +30,4 @@ watchdog~=3.0.0
# volcengine>=1.0.119 # volcengine>=1.0.119
# pymilvus>=2.3.4 # pymilvus>=2.3.4
# psycopg2==2.9.9 # psycopg2==2.9.9
# pgvector>=0.2.4 # pgvector>=0.2.4
# flash-attn>=2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat

View File

@ -6,7 +6,7 @@ from datetime import datetime
import os import os
import re import re
import time import time
from configs import (TEMPERATURE, HISTORY_LEN, PROMPT_TEMPLATES, from configs import (TEMPERATURE, HISTORY_LEN, PROMPT_TEMPLATES, LLM_MODELS,
DEFAULT_KNOWLEDGE_BASE, DEFAULT_SEARCH_ENGINE, SUPPORT_AGENT_MODEL) DEFAULT_KNOWLEDGE_BASE, DEFAULT_SEARCH_ENGINE, SUPPORT_AGENT_MODEL)
from server.knowledge_base.utils import LOADER_DICT from server.knowledge_base.utils import LOADER_DICT
import uuid import uuid
@ -164,12 +164,12 @@ def dialogue_page(api: ApiRequest, is_lite: bool = False):
available_models = [] available_models = []
config_models = api.list_config_models() config_models = api.list_config_models()
if not is_lite: if not is_lite:
for k, v in config_models.get("local", {}).items(): # 列出配置了有效本地路径的模型 for k, v in config_models.get("local", {}).items():
if (v.get("model_path_exists") if (v.get("model_path_exists")
and k not in running_models): and k not in running_models):
available_models.append(k) available_models.append(k)
for k, v in config_models.get("online", {}).items(): # 列出ONLINE_MODELS中直接访问的模型 for k, v in config_models.get("online", {}).items():
if not v.get("provider") and k not in running_models: if not v.get("provider") and k not in running_models and k in LLM_MODELS:
available_models.append(k) available_models.append(k)
llm_models = running_models + available_models llm_models = running_models + available_models
cur_llm_model = st.session_state.get("cur_llm_model", default_model) cur_llm_model = st.session_state.get("cur_llm_model", default_model)

View File

@ -437,9 +437,6 @@ class ApiRequest:
"prompt_name": prompt_name, "prompt_name": prompt_name,
} }
# print(f"received input message:")
# pprint(data)
response = self.post( response = self.post(
"/chat/file_chat", "/chat/file_chat",
json=data, json=data,