mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-29 18:29:44 +08:00
Update startup.py
修复vllm=0.2.2版本中vllm_worker启动失败的bug
This commit is contained in:
parent
717355bf1e
commit
abb32722fe
@ -137,7 +137,9 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
|
||||
args.quantization = None
|
||||
args.max_log_len = None
|
||||
args.tokenizer_revision = None
|
||||
|
||||
# 0.2.2 vllm需要新加的参数
|
||||
args.max_paddings = 256
|
||||
|
||||
if args.model_path:
|
||||
args.model = args.model_path
|
||||
if args.num_gpus > 1:
|
||||
@ -161,7 +163,7 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
|
||||
conv_template = args.conv_template,
|
||||
)
|
||||
sys.modules["fastchat.serve.vllm_worker"].engine = engine
|
||||
# sys.modules["fastchat.serve.vllm_worker"].worker = worker
|
||||
sys.modules["fastchat.serve.vllm_worker"].worker = worker
|
||||
sys.modules["fastchat.serve.vllm_worker"].logger.setLevel(log_level)
|
||||
|
||||
else:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user