diff --git a/configs/loom.yaml.example b/configs/loom.yaml.example index aedd7a6c..b3d9d1cb 100644 --- a/configs/loom.yaml.example +++ b/configs/loom.yaml.example @@ -9,6 +9,10 @@ publish_server: host: "127.0.0.1" port: 8001 +subscribe_server: + host: "127.0.0.1" + port: 8002 + openai_plugins_folder: - "openai_plugins" openai_plugins_load_folder: @@ -20,323 +24,3 @@ plugins: name: "openai" - zhipuai: name: "zhipuai" - - - imitater: - name: "imitater" - logdir: "logs" - worker_name: "qwen-worker1" - run_openai_api: - host: "127.0.0.1" - port: 30000 - imitate_model_workers: - - qwen-worker1: - model: - name: "Qwen-1_8B-Chat" - chat_model_path: "Qwen-1_8B" - chat_model_device: "0" - chat_template_path: "openai_plugins/imitater/templates/qwen.jinja" - generation_config_path: "openai_plugins/imitater/generation_config/qwen" - agent_type: "react" - - embedding: - name: "bge-large-zh" - embed_model_path: "BAAI/bge-large-zh" - embed_model_device: "0" - embed_batch_size: 16 - - - fastchat: - name: "fastchat" - logdir: "logs" - # LLM 运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 - llm_device: "auto" - model_names: - - "chatglm3-6b" - run_controller: - host: "127.0.0.1" - port: 20001 - dispatch_method: "shortest_queue" - run_openai_api: - host: "127.0.0.1" - port: 20000 - fschat_model_workers: - - default: - host: "127.0.0.1" - port: 20002 - device: "auto" - infer_turbo: False - - # model_worker多卡加载需要配置的参数 - # "gpus": None, # 使用的GPU,以str的格式指定,如"0,1",如失效请使用CUDA_VISIBLE_DEVICES="0,1"等形式指定 - # "num_gpus": 1, # 使用GPU的数量 - # "max_gpu_memory": "20GiB", # 每个GPU占用的最大显存 - - # 以下为model_worker非常用参数,可根据需要配置 - # "load_8bit": False, # 开启8bit量化 - # "cpu_offloading": None, - # "gptq_ckpt": None, - # "gptq_wbits": 16, - # "gptq_groupsize": -1, - # "gptq_act_order": False, - # "awq_ckpt": None, - # "awq_wbits": 16, - # "awq_groupsize": -1, - # "model_names": LLM_MODELS, - # "conv_template": None, - # "limit_worker_concurrency": 5, - # "stream_interval": 2, - # "no_register": False, - # "embed_in_truncate": False, - - # 以下为vllm_worker配置参数,注意使用vllm必须有gpu,仅在Linux测试通过 - - # tokenizer = model_path # 如果tokenizer与model_path不一致在此处添加 -# 'max_model_len': 1024 -# 'max_parallel_loading_workers': 1 -# 'max_context_len_to_capture': 1024 -# 'enforce_eager': False -# 'tokenizer_mode': 'auto' -# 'trust_remote_code': True -# 'download_dir': None -# 'load_format': 'auto' -# 'dtype': 'auto' -# 'seed': 0 -# 'worker_use_ray': False -# 'pipeline_parallel_size': 1 -# 'tensor_parallel_size': 1 -# 'block_size': 16 -# 'swap_space': 4 # GiB -# 'gpu_memory_utilization': 0.90 -# 'max_num_batched_tokens': 2560 -# 'max_num_seqs': 256 -# 'disable_log_stats': False -# 'conv_template': 'qwen-7b-chat' -# 'limit_worker_concurrency': 5 -# 'no_register': False -# 'num_gpus': 1 -# 'engine_use_ray': False -# 'disable_log_requests': False - - - chatglm3-6b: - host: "127.0.0.1" - device: "cuda" - port: 20009 - - - internlm2-chat-7b: - host: "127.0.0.1" - device: "cuda" - port: 20009 - - Qwen-1_8B: - host: "127.0.0.1" - port: 20008 - - # 以下配置可以不用修改,在model_config中设置启动的模型 - - zhipu-api: - port: 21001 - - - minimax-api: - port: 21002 - - - xinghuo-api: - port: 21003 - - - qianfan-api: - port: 21004 - - - fangzhou-api: - port: 21005 - - - qwen-api: - port: 21006 - - - baichuan-api: - port: 21007 - - - azure-api: - port: 21008 - - - tiangong-api: - port: 21009 - online_llm_model: - # 线上模型。请在server_config中为每个在线API设置不同的端口 - - - "openai-api": - "model_name": "gpt-3.5-turbo" - "api_base_url": "https://api.openai.com/v1" - "api_key": "" - "openai_proxy": "" - - # 具体注册及api key获取请前往 http://open.bigmodel.cn - - "zhipu-api": - "api_key": "" - "version": "chatglm_turbo" # 可选包括 "chatglm_turbo" - "provider": "ChatGLMWorker" - - # 具体注册及api key获取请前往 https://api.minimax.chat/ - - "minimax-api": - "group_id": "" - "api_key": "" - "is_pro": False - "provider": "MiniMaxWorker" - - # 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/ - - "xinghuo-api": - "APPID": "" - "APISecret": "" - "api_key": "" - "version": "v1.5" # 你使用的讯飞星火大模型版本,可选包括 "v3.0", "v1.5", "v2.0" - "provider": "XingHuoWorker" - - # 百度千帆 API,申请方式请参考 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/4lilb2lpf - - "qianfan-api": - "version": "ERNIE-Bot" # 注意大小写。当前支持 "ERNIE-Bot" 或 "ERNIE-Bot-turbo", 更多的见官方文档。 - "version_url": "" # 也可以不填写version,直接填写在千帆申请模型发布的API地址 - "api_key": "" - "secret_key": "" - "provider": "QianFanWorker" - - # 火山方舟 API,文档参考 https://www.volcengine.com/docs/82379 - - "fangzhou-api": - "version": "chatglm-6b-model" # 当前支持 "chatglm-6b-model", 更多的见文档模型支持列表中方舟部分。 - "version_url": "" # 可以不填写version,直接填写在方舟申请模型发布的API地址 - "api_key": "" - "secret_key": "" - "provider": "FangZhouWorker" - - # 阿里云通义千问 API,文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details - - "qwen-api": - "version": "qwen-turbo" # 可选包括 "qwen-turbo", "qwen-plus" - "api_key": "" # 请在阿里云控制台模型服务灵积API-KEY管理页面创建 - "provider": "QwenWorker" - - # 百川 API,申请方式请参考 https://www.baichuan-ai.com/home#api-enter - - "baichuan-api": - "version": "Baichuan2-53B" # 当前支持 "Baichuan2-53B", 见官方文档。 - "api_key": "" - "secret_key": "" - "provider": "BaiChuanWorker" - - # Azure API - - "azure-api": - "deployment_name": "" # 部署容器的名字 - "resource_name": "" # https://{resource_name}.openai.azure.com/openai/ 填写resource_name的部分,其他部分不要填写 - "api_version": "" # API的版本,不是模型版本 - "api_key": "" - "provider": "AzureWorker" - - # 昆仑万维天工 API https://model-platform.tiangong.cn/ - - "tiangong-api": - "version": "SkyChat-MegaVerse" - "api_key": "" - "secret_key": "" - "provider": "TianGongWorker" - "llm_model": - - "chatglm2-6b": "THUDM/chatglm2-6b" - "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k" - "chatglm3-6b": "THUDM/chatglm3-6b" - "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k" - - "Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf" - "Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf" - "Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf" - - "Qwen-1_8B-Chat": "/media/checkpoint/Qwen-1_8B-Chat" - "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat" - "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat" - "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat" - - "baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat" - "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat" - "baichuan2-7b-chat": "baichuan-inc/Baichuan2-7B-Chat" - "baichuan2-13b-chat": "baichuan-inc/Baichuan2-13B-Chat" - - "internlm-7b": "internlm/internlm-7b" - "internlm-chat-7b": "internlm/internlm-chat-7b" - "internlm2-chat-7b": "internlm/internlm2-chat-7b" - "internlm2-chat-20b": "internlm/internlm2-chat-20b" - - "BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat" - "BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k" - - "Yi-34B-Chat": "https://huggingface.co/01-ai/Yi-34B-Chat" - - "agentlm-7b": "THUDM/agentlm-7b" - "agentlm-13b": "THUDM/agentlm-13b" - "agentlm-70b": "THUDM/agentlm-70b" - - "falcon-7b": "tiiuae/falcon-7b" - "falcon-40b": "tiiuae/falcon-40b" - "falcon-rw-7b": "tiiuae/falcon-rw-7b" - - "aquila-7b": "BAAI/Aquila-7B" - "aquilachat-7b": "BAAI/AquilaChat-7B" - "open_llama_13b": "openlm-research/open_llama_13b" - "vicuna-13b-v1.5": "lmsys/vicuna-13b-v1.5" - "koala": "young-geng/koala" - "mpt-7b": "mosaicml/mpt-7b" - "mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter" - "mpt-30b": "mosaicml/mpt-30b" - "opt-66b": "facebook/opt-66b" - "opt-iml-max-30b": "facebook/opt-iml-max-30b" - "gpt2": "gpt2" - "gpt2-xl": "gpt2-xl" - "gpt-j-6b": "EleutherAI/gpt-j-6b" - "gpt4all-j": "nomic-ai/gpt4all-j" - "gpt-neox-20b": "EleutherAI/gpt-neox-20b" - "pythia-12b": "EleutherAI/pythia-12b" - "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5" - "dolly-v2-12b": "databricks/dolly-v2-12b" - "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b" - - vllm_model_dict: - "chatglm2-6b": "THUDM/chatglm2-6b" - "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k" - "chatglm3-6b": "THUDM/chatglm3-6b" - "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k" - - "Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf" - "Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf" - "Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf" - - "Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat" - "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat" - "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat" - "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat" - - "baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat" - "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat" - "baichuan2-7b-chat": "baichuan-inc/Baichuan-7B-Chat" - "baichuan2-13b-chat": "baichuan-inc/Baichuan-13B-Chat" - - "BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat" - "BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k" - - "internlm-7b": "internlm/internlm-7b" - "internlm-chat-7b": "internlm/internlm-chat-7b" - "internlm2-chat-7b": "internlm/Models/internlm2-chat-7b" - "internlm2-chat-20b": "internlm/Models/internlm2-chat-20b" - - "aquila-7b": "BAAI/Aquila-7B" - "aquilachat-7b": "BAAI/AquilaChat-7B" - - "falcon-7b": "tiiuae/falcon-7b" - "falcon-40b": "tiiuae/falcon-40b" - "falcon-rw-7b": "tiiuae/falcon-rw-7b" - "gpt2": "gpt2" - "gpt2-xl": "gpt2-xl" - "gpt-j-6b": "EleutherAI/gpt-j-6b" - "gpt4all-j": "nomic-ai/gpt4all-j" - "gpt-neox-20b": "EleutherAI/gpt-neox-20b" - "pythia-12b": "EleutherAI/pythia-12b" - "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5" - "dolly-v2-12b": "databricks/dolly-v2-12b" - "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b" - "open_llama_13b": "openlm-research/open_llama_13b" - "vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3" - "koala": "young-geng/koala" - "mpt-7b": "mosaicml/mpt-7b" - "mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter" - "mpt-30b": "mosaicml/mpt-30b" - "opt-66b": "facebook/opt-66b" - "opt-iml-max-30b": "facebook/opt-iml-max-30b" - diff --git a/configs/openai-plugins-list.json b/configs/openai-plugins-list.json index 779da2fd..9c37e1fb 100644 --- a/configs/openai-plugins-list.json +++ b/configs/openai-plugins-list.json @@ -1,6 +1,6 @@ { "openai_plugins": [ - "fastchat", "openai", "imitater", "zhipuai" + "imitater" ] } diff --git a/openai_plugins/openai/app.py b/openai_plugins/openai/app.py deleted file mode 100644 index e51443d8..00000000 --- a/openai_plugins/openai/app.py +++ /dev/null @@ -1,48 +0,0 @@ -from loom_core.openai_plugins.core.adapter import ProcessesInfo -from loom_core.openai_plugins.core.application import ApplicationAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class OpenAIApplicationAdapter(ApplicationAdapter): - - def __init__(self, cfg=None, state_dict: dict = None): - self.processesInfo = None - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "application_name": "openai", - "application_version": "0.0.1", - "application_description": "openai application", - "application_author": "openai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - return cls(cfg=cfg, state_dict=_state_dict) - - def init_processes(self, processesInfo: ProcessesInfo): - - self.processesInfo = processesInfo - - def start(self): - pass - - def stop(self): - pass diff --git a/openai_plugins/openai/controller.py b/openai_plugins/openai/controller.py deleted file mode 100644 index c5b67fd6..00000000 --- a/openai_plugins/openai/controller.py +++ /dev/null @@ -1,47 +0,0 @@ -from loom_core.openai_plugins.core.control import ControlAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class OpenAIControlAdapter(ControlAdapter): - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def start_model(self, new_model_name): - pass - - def stop_model(self, model_name: str): - pass - - def replace_model(self, model_name: str, new_model_name: str): - pass - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "controller_name": "openai", - "controller_version": "0.0.1", - "controller_description": "openai controller", - "controller_author": "openai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/openai/install.py b/openai_plugins/openai/install.py deleted file mode 100644 index 8e3edbad..00000000 --- a/openai_plugins/openai/install.py +++ /dev/null @@ -1,104 +0,0 @@ -import logging -import sys -import os -import subprocess -import threading -import re -import locale - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - -# Perform install -processed_install = set() -pip_list = None - - -def handle_stream(stream, prefix): - stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') - for msg in stream: - if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): - if msg.startswith('100%'): - print('\r' + msg, end="", file=sys.stderr), - else: - print('\r' + msg[:-1], end="", file=sys.stderr), - else: - if prefix == '[!]': - print(prefix, msg, end="", file=sys.stderr) - else: - print(prefix, msg, end="") - - -def get_installed_packages(): - global pip_list - - if pip_list is None: - try: - result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) - pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) - except subprocess.CalledProcessError as e: - print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") - return set() - - return pip_list - - -def is_installed(name): - name = name.strip() - - if name.startswith('#'): - return True - - pattern = r'([^<>!=]+)([<>!=]=?)' - match = re.search(pattern, name) - - if match: - name = match.group(1) - - return name.lower() in get_installed_packages() - - -def process_wrap(cmd_str, cwd_path, handler=None): - process = subprocess.Popen(cmd_str, cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - bufsize=1) - - if handler is None: - handler = handle_stream - - stdout_thread = threading.Thread(target=handler, args=(process.stdout, "")) - stderr_thread = threading.Thread(target=handler, args=(process.stderr, "[!]")) - - stdout_thread.start() - stderr_thread.start() - - stdout_thread.join() - stderr_thread.join() - - return process.wait() - - -def install(): - try: - requirements_path = os.path.join(root_dir, 'requirements.txt') - - this_exit_code = 0 - - if os.path.exists(requirements_path): - with open(requirements_path, 'r', encoding="UTF-8") as file: - for line in file: - package_name = line.strip() - if package_name and not is_installed(package_name): - install_cmd = [sys.executable, "-m", "pip", "install", package_name] - this_exit_code += process_wrap(install_cmd, root_dir) - - if this_exit_code != 0: - logger.info(f"[openai_plugins] Restoring fastchat is failed.") - - except Exception as e: - logger.error(f"[openai_plugins] Restoring fastchat is failed.", exc_info=True) - - -if __name__ == "__main__": - install() diff --git a/openai_plugins/openai/openai_plugins.json b/openai_plugins/openai/openai_plugins.json deleted file mode 100644 index 2292ce4b..00000000 --- a/openai_plugins/openai/openai_plugins.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugins_name": "openai", - "endpoint_host": "https://api.openai.com/v1", - "install_file": "install.py", - "application_file": "app.py", - "application_class": "OpenAIApplicationAdapter", - "endpoint_controller_file": "controller.py", - "endpoint_controller_class": "OpenAIControlAdapter", - "profile_endpoint_file": "profile_endpoint.py", - "profile_endpoint_class": "OpenAIProfileEndpointAdapter" -} diff --git a/openai_plugins/openai/profile_endpoint.py b/openai_plugins/openai/profile_endpoint.py deleted file mode 100644 index ebd6d2d1..00000000 --- a/openai_plugins/openai/profile_endpoint.py +++ /dev/null @@ -1,107 +0,0 @@ -import json -from typing import List - -from loom_core.openai_plugins.core.adapter import LLMWorkerInfo -from loom_core.openai_plugins.core.profile_endpoint.core import ProfileEndpointAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class OpenAIProfileEndpointAdapter(ProfileEndpointAdapter): - """Adapter for the profile endpoint.""" - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def list_running_models(self) -> List[LLMWorkerInfo]: - """模型列表及其配置项""" - list_worker = [] - list_worker.append(LLMWorkerInfo(worker_id="gpt-3.5-turbo", - model_name="gpt-3.5-turbo", - model_description="gpt-3.5-turbo", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="gpt-4", - model_name="gpt-4", - model_description="gpt-4", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="gpt-4-1106-preview", - model_name="gpt-4-1106-preview", - model_description="gpt-4-1106-preview", - providers=["model", "embedding"], - model_extra_info="{}")) - # list_worker.append(LLMWorkerInfo(worker_id="text-embedding-ada-002", - # model_name="text-embedding-ada-002", - # model_description="text-embedding-ada-002", - # providers=["embedding"], - # model_extra_info="{}")) - return list_worker - - def list_llm_models(self) -> List[LLMWorkerInfo]: - """获取已配置模型列表""" - list_worker = [] - list_worker.append(LLMWorkerInfo(worker_id="gpt-3.5-turbo", - model_name="gpt-3.5-turbo", - model_description="gpt-3.5-turbo", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="gpt-4", - model_name="gpt-4", - model_description="gpt-4", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="gpt-4-1106-preview", - model_name="gpt-4-1106-preview", - model_description="gpt-4-1106-preview", - providers=["model", "embedding"], - model_extra_info="{}")) - # list_worker.append(LLMWorkerInfo(worker_id="text-embedding-ada-002", - # model_name="text-embedding-ada-002", - # model_description="text-embedding-ada-002", - # providers=["embedding"], - # model_extra_info="{}")) - return list_worker - - def get_model_config(self, model_name) -> LLMWorkerInfo: - - ''' - 获取LLM模型配置项(合并后的) - ''' - - info_obj = LLMWorkerInfo(worker_id=model_name, - model_name=model_name, - model_description="", - providers=["model", "embedding"], - model_extra_info="{}") - - return info_obj - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "profile_name": "openai", - "profile_version": "0.0.1", - "profile_description": "openai profile endpoint", - "profile_author": "openai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/openai/requirements.txt b/openai_plugins/openai/requirements.txt deleted file mode 100644 index c2fd4d73..00000000 --- a/openai_plugins/openai/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -openai>=1.7.1 \ No newline at end of file