diff --git a/openai_plugins/imitater/app.py b/openai_plugins/imitater/app.py deleted file mode 100644 index 50cfb561..00000000 --- a/openai_plugins/imitater/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import multiprocessing as mp -from multiprocessing import Process -from typing import List - -from loom_core.constants import LOOM_LOG_BACKUP_COUNT, LOOM_LOG_MAX_BYTES -from loom_core.openai_plugins.core.adapter import ProcessesInfo -from loom_core.openai_plugins.core.application import ApplicationAdapter - -import os -import sys -import logging - -from loom_core.openai_plugins.deploy.utils import get_timestamp_ms, get_config_dict, get_log_file -from omegaconf import OmegaConf - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) -import imitater_process_dict -from imitater_config import ImitaterCfg -from imitater_wrapper import run_imitater - - -class ImitaterApplicationAdapter(ApplicationAdapter): - model_worker_started: mp.Event = None - - def __init__(self, cfg=None, state_dict: dict = None): - self.processesInfo = None - self._cfg = ImitaterCfg(cfg=cfg) - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "application_name": "Imitate", - "application_version": "0.0.1", - "application_description": "Imitate application", - "application_author": "Imitate" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - return cls(cfg=cfg, state_dict=_state_dict) - - def init_processes(self, processesInfo: ProcessesInfo): - - self.processesInfo = processesInfo - - logging_conf = get_config_dict( - processesInfo.log_level, - get_log_file(log_path=self._cfg.get_cfg().get("logdir"), sub_dir=f"local_{get_timestamp_ms()}"), - LOOM_LOG_BACKUP_COUNT, - LOOM_LOG_MAX_BYTES, - ) - logging.config.dictConfig(logging_conf) # type: ignore - worker_name = self._cfg.get_cfg().get("worker_name", []) - imitater_process_dict.mp_manager = mp.Manager() - - # prevent re-init cuda error. - mp.set_start_method(method="spawn", force=True) - - self.model_worker_started = imitater_process_dict.mp_manager.Event() - - process = Process( - target=run_imitater, - name=f"model_worker - {worker_name}", - kwargs=dict(cfg=self._cfg, - worker_name=worker_name, - started_event=self.model_worker_started, - logging_conf=logging_conf), - daemon=True, - ) - imitater_process_dict.processes[worker_name] = process - - def start(self): - - for n, p in imitater_process_dict.processes.items(): - p.start() - p.name = f"{p.name} ({p.pid})" - - # 等待 model_worker启动完成 - # self.model_worker_started.wait() - - def stop(self): - imitater_process_dict.stop() - diff --git a/openai_plugins/imitater/controller.py b/openai_plugins/imitater/controller.py deleted file mode 100644 index 81077fef..00000000 --- a/openai_plugins/imitater/controller.py +++ /dev/null @@ -1,80 +0,0 @@ -import time -from multiprocessing import Process - -from loom_core.openai_plugins.core.control import ControlAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - -import imitater_process_dict - -from imitater_wrapper import run_imitater -from imitater_config import ImitaterCfg - - -class ImitaterControlAdapter(ControlAdapter): - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = ImitaterCfg(cfg=cfg) - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def start_model(self, new_model_name): - - imitater_process_dict.stop() - - logger.info(f"准备启动新进程:{new_model_name}") - e = imitater_process_dict.mp_manager.Event() - process = Process( - target=run_imitater, - name=f"model_worker - {new_model_name}", - kwargs=dict(cfg=self._cfg, - worker_name=new_model_name, - started_event=e), - daemon=True, - ) - process.start() - process.name = f"{process.name} ({process.pid})" - imitater_process_dict.processes[new_model_name] = process - # e.wait() - logger.info(f"成功启动新进程:{new_model_name}") - - def stop_model(self, model_name: str): - - if model_name in imitater_process_dict.processes: - process = imitater_process_dict.processes.pop(model_name) - time.sleep(1) - process.kill() - logger.info(f"停止进程:{model_name}") - else: - logger.error(f"未找到进程:{model_name}") - raise Exception(f"未找到进程:{model_name}") - - def replace_model(self, model_name: str, new_model_name: str): - pass - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "controller_name": "Imitate", - "controller_version": "0.0.1", - "controller_description": "Imitate controller", - "controller_author": "Imitate" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/imitater/generation_config/internlm2/generation_config.json b/openai_plugins/imitater/generation_config/internlm2/generation_config.json deleted file mode 100644 index b15eff06..00000000 --- a/openai_plugins/imitater/generation_config/internlm2/generation_config.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "bos_token_id": 1, - "eos_token_id": [2, 92542], - "pad_token_id": 2, - "max_new_tokens": 1024, - "do_sample": true, - "transformers_version": "4.33.2" -} \ No newline at end of file diff --git a/openai_plugins/imitater/generation_config/qwen/generation_config.json b/openai_plugins/imitater/generation_config/qwen/generation_config.json deleted file mode 100644 index 5d665484..00000000 --- a/openai_plugins/imitater/generation_config/qwen/generation_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "eos_token_id": [151643, 151645], - "pad_token_id": 151643, - "max_new_tokens": 1024, - "do_sample": true, - "top_k": 0, - "top_p": 0.8, - "transformers_version": "4.34.0" -} \ No newline at end of file diff --git a/openai_plugins/imitater/imitater_config.py b/openai_plugins/imitater/imitater_config.py deleted file mode 100644 index 99d8cade..00000000 --- a/openai_plugins/imitater/imitater_config.py +++ /dev/null @@ -1,80 +0,0 @@ -# httpx 请求默认超时时间(秒)。如果加载模型或对话较慢,出现超时错误,可以适当加大该值。 -from typing import List, TypedDict - -HTTPX_DEFAULT_TIMEOUT = 300.0 -log_verbose = True - - -class ImitaterModel(TypedDict): - name: str - chat_model_path: str - chat_model_device: str - chat_template_path: str - generation_config_path: str - agent_type: str - - -class ImitaterEmbedding(TypedDict): - name: str - embed_model_path: str - embed_model_device: str - embed_batch_size: int - - -class ImitaterWorker(TypedDict): - name: str - model: ImitaterModel - embedding: ImitaterEmbedding - - -class ImitaterCfg: - def __init__(self, cfg: dict = None): - if cfg is None: - raise RuntimeError("ImitaterCfg cfg is None.") - self._cfg = cfg - - def get_cfg(self): - return self._cfg - - def get_run_openai_api_cfg(self): - return self._cfg.get("run_openai_api", {}) - - def get_imitate_model_workers_by_name(self, name: str) -> ImitaterWorker: - - imitate_model_workers_cfg = self._cfg.get("imitate_model_workers", None) - if imitate_model_workers_cfg is None: - raise RuntimeError("imitate_model_workers_cfg is None.") - - get = lambda model_name: imitate_model_workers_cfg[ - self.get_imitate_model_workers_index_by_name(model_name) - ].get(model_name, {}) - imitate = get(name) - # 初始化imitate为ImitaterWorker - worker_cfg = ImitaterWorker(name=name, - model=ImitaterModel(**imitate.get("model", {})), - embedding=ImitaterEmbedding(**imitate.get("embedding", {})) - ) - return worker_cfg - - def get_imitate_model_workers_names(self) -> List[str]: - - imitate_model_workers_cfg = self._cfg.get("imitate_model_workers", None) - if imitate_model_workers_cfg is None: - raise RuntimeError("imitate_model_workers_cfg is None.") - worker_name_cfg = [] - for cfg in imitate_model_workers_cfg: - for key, imitate_model_workers in cfg.items(): - worker_name_cfg.append(key) - return worker_name_cfg - - def get_imitate_model_workers_index_by_name(self, name) -> int: - - imitate_model_workers_cfg = self._cfg.get("imitate_model_workers", None) - if imitate_model_workers_cfg is None: - raise RuntimeError("imitate_model_workers_cfg is None.") - - for cfg in imitate_model_workers_cfg: - for key, imitate_model_workers in cfg.items(): - if key == name: - return imitate_model_workers_cfg.index(cfg) - return -1 diff --git a/openai_plugins/imitater/imitater_process_dict.py b/openai_plugins/imitater/imitater_process_dict.py deleted file mode 100644 index ac264128..00000000 --- a/openai_plugins/imitater/imitater_process_dict.py +++ /dev/null @@ -1,22 +0,0 @@ -from multiprocessing import Process -from typing import Dict -import logging - -logger = logging.getLogger(__name__) -mp_manager = None -processes: Dict[str, Process] = {} - - -def stop(): - for n, process in processes.items(): - logger.warning("Sending SIGKILL to %s", p) - try: - - process.kill() - except Exception as e: - logger.info("Failed to kill process %s", p, exc_info=True) - - for n, p in processes.items(): - logger.info("Process status: %s", p) - - del processes diff --git a/openai_plugins/imitater/imitater_wrapper.py b/openai_plugins/imitater/imitater_wrapper.py deleted file mode 100644 index 6db0bf4c..00000000 --- a/openai_plugins/imitater/imitater_wrapper.py +++ /dev/null @@ -1,60 +0,0 @@ -import multiprocessing -from typing import List, Optional, Dict - -from fastapi import FastAPI -import sys - -import multiprocessing as mp -import uvicorn -import os -import logging -import asyncio -import signal -import inspect - -logger = logging.getLogger(__name__) -# 为了能使用fastchat_wrapper.py中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - -from imitater_config import ImitaterCfg - -""" -防止Can't pickle Function -""" - - -def _start_imitater( - started_event: mp.Event = None -): - from imitater.service.app import launch_app - # 跳过键盘中断, - signal.signal(signal.SIGINT, lambda *_: None) - launch_app() - - -def run_imitater( - cfg: ImitaterCfg, - worker_name: str, - started_event: mp.Event = None, - logging_conf: Optional[dict] = None): - # 跳过键盘中断, - signal.signal(signal.SIGINT, lambda *_: None) - - logging.config.dictConfig(logging_conf) # type: ignore - import os - worker_cfg = cfg.get_imitate_model_workers_by_name(worker_name) - - os.environ["AGENT_TYPE"] = worker_cfg.get("model").get("agent_type") - os.environ["CHAT_MODEL_PATH"] = worker_cfg.get("model").get("chat_model_path") - os.environ["CHAT_MODEL_DEVICE"] = worker_cfg.get("model").get("chat_model_device") - os.environ["CHAT_TEMPLATE_PATH"] = worker_cfg.get("model").get("chat_template_path") - os.environ["GENERATION_CONFIG_PATH"] = worker_cfg.get("model").get("generation_config_path") - - os.environ["EMBED_MODEL_PATH"] = worker_cfg.get("embedding").get("embed_model_path") - os.environ["EMBED_MODEL_DEVICE"] = worker_cfg.get("embedding").get("embed_model_device") - os.environ["EMBED_BATCH_SIZE"] = str(worker_cfg.get("embedding").get("embed_batch_size")) - - os.environ["SERVICE_PORT"] = str(cfg.get_run_openai_api_cfg().get("port", 30000)) - - _start_imitater(started_event=started_event) diff --git a/openai_plugins/imitater/install.py b/openai_plugins/imitater/install.py deleted file mode 100644 index 8e3edbad..00000000 --- a/openai_plugins/imitater/install.py +++ /dev/null @@ -1,104 +0,0 @@ -import logging -import sys -import os -import subprocess -import threading -import re -import locale - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - -# Perform install -processed_install = set() -pip_list = None - - -def handle_stream(stream, prefix): - stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') - for msg in stream: - if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): - if msg.startswith('100%'): - print('\r' + msg, end="", file=sys.stderr), - else: - print('\r' + msg[:-1], end="", file=sys.stderr), - else: - if prefix == '[!]': - print(prefix, msg, end="", file=sys.stderr) - else: - print(prefix, msg, end="") - - -def get_installed_packages(): - global pip_list - - if pip_list is None: - try: - result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) - pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) - except subprocess.CalledProcessError as e: - print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") - return set() - - return pip_list - - -def is_installed(name): - name = name.strip() - - if name.startswith('#'): - return True - - pattern = r'([^<>!=]+)([<>!=]=?)' - match = re.search(pattern, name) - - if match: - name = match.group(1) - - return name.lower() in get_installed_packages() - - -def process_wrap(cmd_str, cwd_path, handler=None): - process = subprocess.Popen(cmd_str, cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - bufsize=1) - - if handler is None: - handler = handle_stream - - stdout_thread = threading.Thread(target=handler, args=(process.stdout, "")) - stderr_thread = threading.Thread(target=handler, args=(process.stderr, "[!]")) - - stdout_thread.start() - stderr_thread.start() - - stdout_thread.join() - stderr_thread.join() - - return process.wait() - - -def install(): - try: - requirements_path = os.path.join(root_dir, 'requirements.txt') - - this_exit_code = 0 - - if os.path.exists(requirements_path): - with open(requirements_path, 'r', encoding="UTF-8") as file: - for line in file: - package_name = line.strip() - if package_name and not is_installed(package_name): - install_cmd = [sys.executable, "-m", "pip", "install", package_name] - this_exit_code += process_wrap(install_cmd, root_dir) - - if this_exit_code != 0: - logger.info(f"[openai_plugins] Restoring fastchat is failed.") - - except Exception as e: - logger.error(f"[openai_plugins] Restoring fastchat is failed.", exc_info=True) - - -if __name__ == "__main__": - install() diff --git a/openai_plugins/imitater/openai_plugins.json b/openai_plugins/imitater/openai_plugins.json deleted file mode 100644 index 9486b430..00000000 --- a/openai_plugins/imitater/openai_plugins.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugins_name": "imitater", - "endpoint_host": "http://127.0.0.1:30000/v1", - "install_file": "install.py", - "application_file": "app.py", - "application_class": "ImitaterApplicationAdapter", - "endpoint_controller_file": "controller.py", - "endpoint_controller_class": "ImitaterControlAdapter", - "profile_endpoint_file": "profile_endpoint.py", - "profile_endpoint_class": "ImitaterProfileEndpointAdapter" -} diff --git a/openai_plugins/imitater/profile_endpoint.py b/openai_plugins/imitater/profile_endpoint.py deleted file mode 100644 index 0a872fef..00000000 --- a/openai_plugins/imitater/profile_endpoint.py +++ /dev/null @@ -1,77 +0,0 @@ -import json -from typing import List - -from loom_core.openai_plugins.core.adapter import LLMWorkerInfo -from loom_core.openai_plugins.core.profile_endpoint.core import ProfileEndpointAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -# 为了能使用插件中的函数,需要将当前目录加入到sys.path中 -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) -from imitater_config import ImitaterCfg -import imitater_process_dict - - -class ImitaterProfileEndpointAdapter(ProfileEndpointAdapter): - """Adapter for the profile endpoint.""" - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = ImitaterCfg(cfg=cfg) - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def list_running_models(self) -> List[LLMWorkerInfo]: - """模型列表及其配置项""" - list_worker = [] - - for worker_name, process in imitater_process_dict.processes.items(): - list_worker.append(self.get_model_config(worker_name)) - return list_worker - - def list_llm_models(self) -> List[LLMWorkerInfo]: - """获取已配置模型列表""" - list_worker = [] - workers_names = self._cfg.get_imitate_model_workers_names() - for worker_name in workers_names: - list_worker.append(self.get_model_config(worker_name)) - - return list_worker - - def get_model_config(self, model_name) -> LLMWorkerInfo: - - ''' - 获取LLM模型配置项(合并后的) - ''' - - worker_cfg = self._cfg.get_imitate_model_workers_by_name(model_name) - - info_obj = LLMWorkerInfo(worker_id=model_name, - model_name=model_name, - model_description="", - providers=["model", "embedding"], - model_extra_info=json.dumps(dict(worker_cfg), ensure_ascii=False, indent=4)) - return info_obj - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "profile_name": "Imitate", - "profile_version": "0.0.1", - "profile_description": "Imitate profile endpoint", - "profile_author": "Imitate" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/imitater/requirements.txt b/openai_plugins/imitater/requirements.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/openai_plugins/imitater/templates/baichuan.jinja b/openai_plugins/imitater/templates/baichuan.jinja deleted file mode 100644 index ae65f8be..00000000 --- a/openai_plugins/imitater/templates/baichuan.jinja +++ /dev/null @@ -1,13 +0,0 @@ -{%- if not add_generation_prompt is defined -%} - {%- set add_generation_prompt = false -%} -{%- endif -%} -{%- for message in messages -%} - {%- if message['role'] == 'user' -%} - {{ '' + message['content']}} - {%- elif message['role'] == 'assistant' -%} - {{ '' + message['content']}} - {%- endif -%} -{%- endfor -%} -{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} - {{ '' }} -{%- endif -%} diff --git a/openai_plugins/imitater/templates/default.jinja b/openai_plugins/imitater/templates/default.jinja deleted file mode 100644 index 043f7e15..00000000 --- a/openai_plugins/imitater/templates/default.jinja +++ /dev/null @@ -1,15 +0,0 @@ -{%- if not add_generation_prompt is defined -%} - {%- set add_generation_prompt = false -%} -{%- endif -%} -{%- for message in messages -%} - {%- if message['role'] == 'system' -%} - {{ message['content'] }} - {%- elif message['role'] == 'user' -%} - {{ 'Human: ' + message['content'] + '\\n' }} - {%- elif message['role'] == 'assistant' -%} - {{ 'Assistant: ' + message['content'] + '\\n' }} - {%- endif -%} -{%- endfor -%} -{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} - {{ 'Assistant: ' }} -{%- endif -%} diff --git a/openai_plugins/imitater/templates/internlm2.jinja b/openai_plugins/imitater/templates/internlm2.jinja deleted file mode 100644 index 96eb8a8d..00000000 --- a/openai_plugins/imitater/templates/internlm2.jinja +++ /dev/null @@ -1,20 +0,0 @@ -{%- if not add_generation_prompt is defined -%} - {%- set add_generation_prompt = false -%} -{%- endif -%} -{%- if messages[0]['role'] == 'system' -%} - {%- set loop_messages = messages[1:] -%} - {%- set system_message = messages[0]['content'] -%} -{%- else -%} - {%- set loop_messages = messages -%} - {%- set system_message = 'You are an AI assistant whose name is InternLM (书生·浦语).\\n- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\\n- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.' -%} -{%- endif -%} -{{ '' + '[UNUSED_TOKEN_146]' + 'system' + '\\n' + system_message + '[UNUSED_TOKEN_145]' + '\\n' }} -{%- for message in loop_messages -%} - {{ '[UNUSED_TOKEN_146]' + message['role'] + '\\n' + message['content']}} - {%- if (loop.last and add_generation_prompt) or not loop.last -%} - {{ '[UNUSED_TOKEN_145]' + '\\n' }} - {%- endif -%} -{%- endfor -%} -{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} - {{ '[UNUSED_TOKEN_146]' + 'assistant' + '\\n' }} -{%- endif -%} diff --git a/openai_plugins/imitater/templates/qwen.jinja b/openai_plugins/imitater/templates/qwen.jinja deleted file mode 100644 index 1ac25602..00000000 --- a/openai_plugins/imitater/templates/qwen.jinja +++ /dev/null @@ -1,20 +0,0 @@ -{%- if not add_generation_prompt is defined -%} - {%- set add_generation_prompt = false -%} -{%- endif -%} -{%- if messages[0]['role'] == 'system' -%} - {%- set loop_messages = messages[1:] -%} - {%- set system_message = messages[0]['content'] -%} -{%- else -%} - {%- set loop_messages = messages -%} - {%- set system_message = 'You are a helpful assistant.' -%} -{%- endif -%} -{{ '<|im_start|>' + 'system' + '\n' + system_message + '<|im_end|>' + '\n' }} -{%- for message in loop_messages -%} - {{ '<|im_start|>' + message['role'] + '\n' + message['content']}} - {%- if (loop.last and add_generation_prompt) or not loop.last -%} - {{ '<|im_end|>' + '\n' }} - {%- endif -%} -{%- endfor -%} -{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} - {{ '<|im_start|>' + 'assistant' + '\n' }} -{%- endif -%} diff --git a/openai_plugins/imitater/templates/yi.jinja b/openai_plugins/imitater/templates/yi.jinja deleted file mode 100644 index a78e4cc0..00000000 --- a/openai_plugins/imitater/templates/yi.jinja +++ /dev/null @@ -1,12 +0,0 @@ -{%- if not add_generation_prompt is defined -%} - {%- set add_generation_prompt = false -%} -{%- endif -%} -{%- for message in messages -%} - {{ '<|im_start|>' + message['role'] + '\\n' + message['content']}} - {%- if (loop.last and add_generation_prompt) or not loop.last -%} - {{ '<|im_end|>' + '\\n' }} - {%- endif -%} -{%- endfor -%} -{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} - {{ '<|im_start|>' + 'assistant' + '\\n' }} -{%- endif -%} diff --git a/openai_plugins/zhipuai/app.py b/openai_plugins/zhipuai/app.py deleted file mode 100644 index e8d4678a..00000000 --- a/openai_plugins/zhipuai/app.py +++ /dev/null @@ -1,48 +0,0 @@ -from loom_core.openai_plugins.core.adapter import ProcessesInfo -from loom_core.openai_plugins.core.application import ApplicationAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class ZhipuAIApplicationAdapter(ApplicationAdapter): - - def __init__(self, cfg=None, state_dict: dict = None): - self.processesInfo = None - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "application_name": "zhipuai", - "application_version": "0.0.1", - "application_description": "zhipuai application", - "application_author": "zhipuai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - return cls(cfg=cfg, state_dict=_state_dict) - - def init_processes(self, processesInfo: ProcessesInfo): - - self.processesInfo = processesInfo - - def start(self): - pass - - def stop(self): - pass diff --git a/openai_plugins/zhipuai/controller.py b/openai_plugins/zhipuai/controller.py deleted file mode 100644 index 6b0f3f35..00000000 --- a/openai_plugins/zhipuai/controller.py +++ /dev/null @@ -1,46 +0,0 @@ -from loom_core.openai_plugins.core.control import ControlAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class ZhipuAIControlAdapter(ControlAdapter): - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def start_model(self, new_model_name): - pass - - def stop_model(self, model_name: str): - pass - - def replace_model(self, model_name: str, new_model_name: str): - pass - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "controller_name": "zhipuai", - "controller_version": "0.0.1", - "controller_description": "zhipuai controller", - "controller_author": "zhipuai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/zhipuai/install.py b/openai_plugins/zhipuai/install.py deleted file mode 100644 index a7b8cf57..00000000 --- a/openai_plugins/zhipuai/install.py +++ /dev/null @@ -1,101 +0,0 @@ -import logging -import sys -import os -import subprocess -import threading -import re -import locale - -logger = logging.getLogger(__name__) -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) -processed_install = set() -pip_list = None - - -def handle_stream(stream, prefix): - stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') - for msg in stream: - if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): - if msg.startswith('100%'): - print('\r' + msg, end="", file=sys.stderr), - else: - print('\r' + msg[:-1], end="", file=sys.stderr), - else: - if prefix == '[!]': - print(prefix, msg, end="", file=sys.stderr) - else: - print(prefix, msg, end="") - - -def get_installed_packages(): - global pip_list - - if pip_list is None: - try: - result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) - pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) - except subprocess.CalledProcessError as e: - print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") - return set() - - return pip_list - - -def is_installed(name): - name = name.strip() - - if name.startswith('#'): - return True - - pattern = r'([^<>!=]+)([<>!=]=?)' - match = re.search(pattern, name) - - if match: - name = match.group(1) - - return name.lower() in get_installed_packages() - - -def process_wrap(cmd_str, cwd_path, handler=None): - process = subprocess.Popen(cmd_str, cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - bufsize=1) - - if handler is None: - handler = handle_stream - - stdout_thread = threading.Thread(target=handler, args=(process.stdout, "")) - stderr_thread = threading.Thread(target=handler, args=(process.stderr, "[!]")) - - stdout_thread.start() - stderr_thread.start() - - stdout_thread.join() - stderr_thread.join() - - return process.wait() - - -def install(): - try: - requirements_path = os.path.join(root_dir, 'requirements.txt') - - this_exit_code = 0 - - if os.path.exists(requirements_path): - with open(requirements_path, 'r', encoding="UTF-8") as file: - for line in file: - package_name = line.strip() - if package_name and not is_installed(package_name): - install_cmd = [sys.executable, "-m", "pip", "install", package_name] - this_exit_code += process_wrap(install_cmd, root_dir) - - if this_exit_code != 0: - logger.info(f"[openai_plugins] Restoring fastchat is failed.") - - except Exception as e: - logger.error(f"[openai_plugins] Restoring fastchat is failed.", exc_info=True) - - -if __name__ == "__main__": - install() diff --git a/openai_plugins/zhipuai/openai_plugins.json b/openai_plugins/zhipuai/openai_plugins.json deleted file mode 100644 index 24a0708a..00000000 --- a/openai_plugins/zhipuai/openai_plugins.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugins_name": "zhipuai", - "endpoint_host": "https://open.bigmodel.cn/api/paas/v4/", - "install_file": "install.py", - "application_file": "app.py", - "application_class": "ZhipuAIApplicationAdapter", - "endpoint_controller_file": "controller.py", - "endpoint_controller_class": "ZhipuAIControlAdapter", - "profile_endpoint_file": "profile_endpoint.py", - "profile_endpoint_class": "ZhipuAIProfileEndpointAdapter" -} diff --git a/openai_plugins/zhipuai/profile_endpoint.py b/openai_plugins/zhipuai/profile_endpoint.py deleted file mode 100644 index 0fd8969b..00000000 --- a/openai_plugins/zhipuai/profile_endpoint.py +++ /dev/null @@ -1,96 +0,0 @@ -from typing import List - -from loom_core.openai_plugins.core.adapter import LLMWorkerInfo -from loom_core.openai_plugins.core.profile_endpoint.core import ProfileEndpointAdapter - -import os -import sys -import logging - -logger = logging.getLogger(__name__) - -root_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(root_dir) - - -class ZhipuAIProfileEndpointAdapter(ProfileEndpointAdapter): - """Adapter for the profile endpoint.""" - - def __init__(self, cfg=None, state_dict: dict = None): - self._cfg = cfg - super().__init__(state_dict=state_dict) - - def class_name(self) -> str: - """Get class name.""" - return self.__name__ - - def list_running_models(self) -> List[LLMWorkerInfo]: - """模型列表及其配置项""" - list_worker = [] - list_worker.append(LLMWorkerInfo(worker_id="glm-4", - model_name="glm-4", - model_description="glm-4", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="glm-3-turbo", - model_name="glm-3-turbo", - model_description="glm-3-turbo", - providers=["model", "embedding"], - model_extra_info="{}")) - # list_worker.append(LLMWorkerInfo(worker_id="embedding-2", - # model_name="embedding-2", - # model_description="embedding-2", - # providers=["embedding"], - # model_extra_info="{}")) - return list_worker - - def list_llm_models(self) -> List[LLMWorkerInfo]: - """获取已配置模型列表""" - list_worker = [] - list_worker.append(LLMWorkerInfo(worker_id="glm-4", - model_name="glm-4", - model_description="glm-4", - providers=["model", "embedding"], - model_extra_info="{}")) - list_worker.append(LLMWorkerInfo(worker_id="glm-3-turbo", - model_name="glm-3-turbo", - model_description="glm-3-turbo", - providers=["model", "embedding"], - model_extra_info="{}")) - # list_worker.append(LLMWorkerInfo(worker_id="embedding-2", - # model_name="embedding-2", - # model_description="embedding-2", - # providers=["embedding"], - # model_extra_info="{}")) - return list_worker - - def get_model_config(self, model_name) -> LLMWorkerInfo: - - ''' - 获取LLM模型配置项(合并后的) - ''' - - info_obj = LLMWorkerInfo(worker_id=model_name, - model_name=model_name, - model_description="", - providers=["model", "embedding"], - model_extra_info="{}") - - return info_obj - - @classmethod - def from_config(cls, cfg=None): - _state_dict = { - "profile_name": "zhipuai", - "profile_version": "0.0.1", - "profile_description": "zhipuai profile endpoint", - "profile_author": "zhipuai" - } - state_dict = cfg.get("state_dict", {}) - if state_dict is not None and _state_dict is not None: - _state_dict = {**state_dict, **_state_dict} - else: - # 处理其中一个或两者都为 None 的情况 - _state_dict = state_dict or _state_dict or {} - - return cls(cfg=cfg, state_dict=_state_dict) diff --git a/openai_plugins/zhipuai/requirements.txt b/openai_plugins/zhipuai/requirements.txt deleted file mode 100644 index bb53db5f..00000000 --- a/openai_plugins/zhipuai/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -zhipuai>=2.0.1 \ No newline at end of file