Merge pull request #2782 from zRzRzRzRzRzRzR/dev

Dev
This commit is contained in:
zR 2024-01-24 17:04:10 +08:00 committed by GitHub
commit f50400a204
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 10 additions and 14 deletions

View File

@ -40,8 +40,6 @@ FSCHAT_MODEL_WORKERS = {
"device": LLM_DEVICE,
# False,'vllm',使用的推理加速框架,使用vllm如果出现HuggingFace通信问题参见doc/FAQ
# vllm对一些模型支持还不成熟暂时默认关闭
# fschat=0.2.33的代码有bug, 如需使用源码修改fastchat.server.vllm_worker
# 将103行中sampling_params = SamplingParams的参数stop=list(stop)修改为stop= [i for i in stop if i!=""]
"infer_turbo": False,
# model_worker多卡加载需要配置的参数

View File

@ -38,7 +38,7 @@ transformers_stream_generator==0.0.4
vllm==0.2.7; sys_platform == "linux"
llama-index==0.9.35
# jq==1.6.0
#jq==1.6.0
# beautifulsoup4==4.12.2
# pysrt==1.1.2
# dashscope==1.13.6 # qwen
@ -47,8 +47,9 @@ llama-index==0.9.35
# pymilvus==2.3.4
# psycopg2==2.9.9
# pgvector==0.2.4
# flash-attn==2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat
# rapidocr_paddle[gpu]==1.3.0.post5 # gpu accelleration for ocr of pdf and image files
#flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
#autoawq==0.1.8 # For Int4
#rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files
arxiv==2.1.0
youtube-search==2.1.2

View File

@ -52,5 +52,6 @@ llama-index==0.9.35
# pymilvus>=2.3.4
# psycopg2==2.9.9
# pgvector>=0.2.4
# flash-attn>=2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat
# rapidocr_paddle[gpu]>=1.3.0.post5
#flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
#autoawq==0.1.8 # For Int4
#rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files

View File

@ -30,5 +30,4 @@ watchdog~=3.0.0
# volcengine>=1.0.119
# pymilvus>=2.3.4
# psycopg2==2.9.9
# pgvector>=0.2.4
# flash-attn>=2.4.3 # For Orion-14B-Chat and Qwen-14B-Chat
# pgvector>=0.2.4

View File

@ -164,11 +164,11 @@ def dialogue_page(api: ApiRequest, is_lite: bool = False):
available_models = []
config_models = api.list_config_models()
if not is_lite:
for k, v in config_models.get("local", {}).items(): # 列出配置了有效本地路径的模型
for k, v in config_models.get("local", {}).items():
if (v.get("model_path_exists")
and k not in running_models):
available_models.append(k)
for k, v in config_models.get("online", {}).items(): # 列出ONLINE_MODELS中可直接访问且在LLM_MODELS中配置的模型
for k, v in config_models.get("online", {}).items():
if not v.get("provider") and k not in running_models and k in LLM_MODELS:
available_models.append(k)
llm_models = running_models + available_models

View File

@ -437,9 +437,6 @@ class ApiRequest:
"prompt_name": prompt_name,
}
# print(f"received input message:")
# pprint(data)
response = self.post(
"/chat/file_chat",
json=data,