调整依赖;优化webui模型列表;更新wiki版本

This commit is contained in:
liunux4odoo 2024-01-23 13:11:15 +08:00
parent 60d5f7d439
commit 7248163b07
5 changed files with 23 additions and 17 deletions

@ -1 +1 @@
Subproject commit 2f24adb218f23eab00d7fcd7ccf5072f2f35cb3c
Subproject commit 28f664aa08f8191a70339c9ecbe7a89b35a1032a

View File

@ -36,10 +36,12 @@ pandas==2.0.3
einops==0.7.0
transformers_stream_generator==0.0.4
vllm==0.2.7; sys_platform == "linux"
jq==1.6.0
beautifulsoup4==4.12.2
pysrt==1.1.2
dashscope==1.13.6 # qwen
llama-index==0.9.35
# jq==1.6.0
# beautifulsoup4==4.12.2
# pysrt==1.1.2
# dashscope==1.13.6 # qwen
# volcengine==1.0.119 # fangzhou
# uncomment libs if you want to use corresponding vector store
# pymilvus==2.3.4
@ -47,6 +49,7 @@ dashscope==1.13.6 # qwen
# pgvector==0.2.4
# flash-attn==2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat
# rapidocr_paddle[gpu]==1.3.0.post5 # gpu accelleration for ocr of pdf and image files
arxiv==2.1.0
youtube-search==2.1.2
duckduckgo-search==3.9.9

View File

@ -37,15 +37,16 @@ einops>=0.7.0
transformers_stream_generator==0.0.4
vllm==0.2.7; sys_platform == "linux"
httpx==0.26.0
llama-index
jq==1.6.0
beautifulsoup4~=4.12.2
pysrt~=1.1.2
dashscope==1.13.6
arxiv~=2.1.0
youtube-search~=2.1.2
duckduckgo-search~=3.9.9
metaphor-python~=0.1.23
llama-index==0.9.35
# jq==1.6.0
# beautifulsoup4~=4.12.2
# pysrt~=1.1.2
# dashscope==1.13.6
# arxiv~=2.1.0
# youtube-search~=2.1.2
# duckduckgo-search~=3.9.9
# metaphor-python~=0.1.23
# volcengine>=1.0.119
# pymilvus>=2.3.4

View File

@ -19,6 +19,8 @@ rapidocr_onnxruntime~=1.3.8
requests~=2.31.0
pathlib~=1.0.1
pytest~=7.4.3
llama-index==0.9.35
dashscope==1.13.6
arxiv~=2.1.0
youtube-search~=2.1.2

View File

@ -6,7 +6,7 @@ from datetime import datetime
import os
import re
import time
from configs import (TEMPERATURE, HISTORY_LEN, PROMPT_TEMPLATES,
from configs import (TEMPERATURE, HISTORY_LEN, PROMPT_TEMPLATES, LLM_MODELS,
DEFAULT_KNOWLEDGE_BASE, DEFAULT_SEARCH_ENGINE, SUPPORT_AGENT_MODEL)
from server.knowledge_base.utils import LOADER_DICT
import uuid
@ -168,8 +168,8 @@ def dialogue_page(api: ApiRequest, is_lite: bool = False):
if (v.get("model_path_exists")
and k not in running_models):
available_models.append(k)
for k, v in config_models.get("online", {}).items(): # 列出ONLINE_MODELS中直接访问的模型
if not v.get("provider") and k not in running_models:
for k, v in config_models.get("online", {}).items(): # 列出ONLINE_MODELS中直接访问且在LLM_MODELS中配置的模型
if not v.get("provider") and k not in running_models and k in LLM_MODELS:
available_models.append(k)
llm_models = running_models + available_models
cur_llm_model = st.session_state.get("cur_llm_model", default_model)