mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-28 17:53:33 +08:00
self.device_map 参数初始化逻辑
LLamaLLM 加载器
This commit is contained in:
parent
49e47231af
commit
62ce5f0775
@ -1,4 +1,4 @@
|
||||
|
||||
from .chatglm_llm import ChatGLM
|
||||
# from .llama_llm import LLamaLLM
|
||||
from .llama_llm import LLamaLLM
|
||||
from .moss_llm import MOSSLLM
|
||||
|
||||
@ -130,13 +130,13 @@ class LoaderCheckPoint:
|
||||
# 可传入device_map自定义每张卡的部署情况
|
||||
if self.device_map is None:
|
||||
if 'chatglm' in model_name.lower():
|
||||
device_map = self.chatglm_auto_configure_device_map(num_gpus)
|
||||
self.device_map = self.chatglm_auto_configure_device_map(num_gpus)
|
||||
elif 'moss' in model_name.lower():
|
||||
device_map = self.moss_auto_configure_device_map(num_gpus, model_name)
|
||||
self.device_map = self.moss_auto_configure_device_map(num_gpus, model_name)
|
||||
else:
|
||||
device_map = self.chatglm_auto_configure_device_map(num_gpus)
|
||||
self.device_map = self.chatglm_auto_configure_device_map(num_gpus)
|
||||
|
||||
model = dispatch_model(model, device_map=device_map)
|
||||
model = dispatch_model(model, device_map=self.device_map)
|
||||
else:
|
||||
print(
|
||||
"Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been "
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user