ConfigModelWorkSpace实现

This commit is contained in:
glide-the 2024-06-11 12:47:11 +08:00
parent 5a60f5f149
commit cd01bb8601
5 changed files with 513 additions and 275 deletions

View File

@ -363,108 +363,140 @@ def _import_embedding_keyword_file() -> Any:
return EMBEDDING_KEYWORD_FILE
def _import_ConfigModel() -> Any:
basic_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = basic_config_load.get("load_mod")
ConfigModel = load_mod(basic_config_load.get("module"), "ConfigModel")
return ConfigModel
def _import_ConfigModelFactory() -> Any:
basic_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = basic_config_load.get("load_mod")
ConfigModelFactory = load_mod(basic_config_load.get("module"), "ConfigModelFactory")
return ConfigModelFactory
def _import_ConfigModelWorkSpace() -> Any:
basic_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = basic_config_load.get("load_mod")
ConfigModelWorkSpace = load_mod(basic_config_load.get("module"), "ConfigModelWorkSpace")
return ConfigModelWorkSpace
def _import_config_model_workspace() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return config_model_workspace
def _import_default_llm_model() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
DEFAULT_LLM_MODEL = load_mod(model_config_load.get("module"), "DEFAULT_LLM_MODEL")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return DEFAULT_LLM_MODEL
return config_model_workspace.get_config().DEFAULT_LLM_MODEL
def _import_default_embedding_model() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
DEFAULT_EMBEDDING_MODEL = load_mod(model_config_load.get("module"), "DEFAULT_EMBEDDING_MODEL")
return DEFAULT_EMBEDDING_MODEL
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return config_model_workspace.get_config().DEFAULT_EMBEDDING_MODEL
def _import_agent_model() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
Agent_MODEL = load_mod(model_config_load.get("module"), "Agent_MODEL")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return Agent_MODEL
return config_model_workspace.get_config().Agent_MODEL
def _import_history_len() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
HISTORY_LEN = load_mod(model_config_load.get("module"), "HISTORY_LEN")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return HISTORY_LEN
return config_model_workspace.get_config().HISTORY_LEN
def _import_max_tokens() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
MAX_TOKENS = load_mod(model_config_load.get("module"), "MAX_TOKENS")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return MAX_TOKENS
return config_model_workspace.get_config().MAX_TOKENS
def _import_temperature() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
TEMPERATURE = load_mod(model_config_load.get("module"), "TEMPERATURE")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return TEMPERATURE
return config_model_workspace.get_config().TEMPERATURE
def _import_support_agent_models() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
SUPPORT_AGENT_MODELS = load_mod(model_config_load.get("module"), "SUPPORT_AGENT_MODELS")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return SUPPORT_AGENT_MODELS
return config_model_workspace.get_config().SUPPORT_AGENT_MODELS
def _import_llm_model_config() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
LLM_MODEL_CONFIG = load_mod(model_config_load.get("module"), "LLM_MODEL_CONFIG")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return LLM_MODEL_CONFIG
return config_model_workspace.get_config().LLM_MODEL_CONFIG
def _import_model_platforms() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
MODEL_PLATFORMS = load_mod(model_config_load.get("module"), "MODEL_PLATFORMS")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return MODEL_PLATFORMS
return config_model_workspace.get_config().MODEL_PLATFORMS
def _import_model_providers_cfg_path() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
MODEL_PROVIDERS_CFG_PATH_CONFIG = load_mod(model_config_load.get("module"), "MODEL_PROVIDERS_CFG_PATH_CONFIG")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return MODEL_PROVIDERS_CFG_PATH_CONFIG
return config_model_workspace.get_config().MODEL_PROVIDERS_CFG_PATH_CONFIG
def _import_model_providers_cfg_host() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
MODEL_PROVIDERS_CFG_HOST = load_mod(model_config_load.get("module"), "MODEL_PROVIDERS_CFG_HOST")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return MODEL_PROVIDERS_CFG_HOST
return config_model_workspace.get_config().MODEL_PROVIDERS_CFG_HOST
def _import_model_providers_cfg_port() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
MODEL_PROVIDERS_CFG_PORT = load_mod(model_config_load.get("module"), "MODEL_PROVIDERS_CFG_PORT")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return MODEL_PROVIDERS_CFG_PORT
return config_model_workspace.get_config().MODEL_PROVIDERS_CFG_PORT
def _import_tool_config() -> Any:
model_config_load = CONFIG_IMPORTS.get("_model_config.py")
load_mod = model_config_load.get("load_mod")
TOOL_CONFIG = load_mod(model_config_load.get("module"), "TOOL_CONFIG")
config_model_workspace = load_mod(model_config_load.get("module"), "config_model_workspace")
return TOOL_CONFIG
return config_model_workspace.get_config().TOOL_CONFIG
def _import_prompt_templates() -> Any:
@ -524,6 +556,14 @@ def __getattr__(name: str) -> Any:
return _import_ConfigBasicWorkSpace()
elif name == "config_basic_workspace":
return _import_config_basic_workspace()
elif name == "ConfigModel":
return _import_ConfigModel()
elif name == "ConfigModelFactory":
return _import_ConfigModelFactory()
elif name == "ConfigModelWorkSpace":
return _import_ConfigModelWorkSpace()
elif name == "config_model_workspace":
return _import_config_model_workspace()
elif name == "log_verbose":
return _import_log_verbose()
elif name == "CHATCHAT_ROOT":
@ -624,7 +664,6 @@ VERSION = "v0.3.0-preview"
__all__ = [
"VERSION",
"config_basic_workspace",
"log_verbose",
"CHATCHAT_ROOT",
"DATA_PATH",
@ -677,4 +716,12 @@ __all__ = [
"ConfigBasicFactory",
"ConfigBasicWorkSpace",
"config_basic_workspace",
"ConfigModel",
"ConfigModelFactory",
"ConfigModelWorkSpace",
"config_model_workspace",
]

View File

@ -6,7 +6,6 @@ import sys
import logging
from typing import Any, Optional
from chatchat.configs._core_config import CF
sys.path.append(str(Path(__file__).parent))
import _core_config as core_config
@ -128,6 +127,9 @@ class ConfigBasicWorkSpace(core_config.ConfigWorkSpace[ConfigBasicFactory, Confi
"""
config_factory_cls = ConfigBasicFactory
def __init__(self):
super().__init__()
def _build_config_factory(self, config_json: Any) -> ConfigBasicFactory:
_config_factory = self.config_factory_cls()
@ -145,9 +147,6 @@ class ConfigBasicWorkSpace(core_config.ConfigWorkSpace[ConfigBasicFactory, Confi
def get_type(cls) -> str:
return ConfigBasic.class_name()
def __init__(self):
super().__init__()
def get_config(self) -> ConfigBasic:
return self._config_factory.get_config()
@ -163,9 +162,5 @@ class ConfigBasicWorkSpace(core_config.ConfigWorkSpace[ConfigBasicFactory, Confi
self._config_factory.log_format(log_format)
self.store_config()
def clear(self):
logger.info("Clear workspace config.")
os.remove(self.workspace_config)
config_basic_workspace: ConfigBasicWorkSpace = ConfigBasicWorkSpace()

View File

@ -1,38 +1,100 @@
import os
import logging
import sys
from pathlib import Path
from typing import Any, Optional, List, Dict
# 默认选用的 LLM 名称
DEFAULT_LLM_MODEL = "chatglm3-6b"
from dataclasses import dataclass
# 默认选用的 Embedding 名称
DEFAULT_EMBEDDING_MODEL = "bge-large-zh-v1.5"
sys.path.append(str(Path(__file__).parent))
import _core_config as core_config
logger = logging.getLogger()
# AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])
Agent_MODEL = None
class ConfigModel(core_config.Config):
DEFAULT_LLM_MODEL: Optional[str] = None
"""默认选用的 LLM 名称"""
DEFAULT_EMBEDDING_MODEL: Optional[str] = None
"""默认选用的 Embedding 名称"""
Agent_MODEL: Optional[str] = None
"""AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])"""
HISTORY_LEN: Optional[int] = None
"""历史对话轮数"""
MAX_TOKENS: Optional[int] = None
"""大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度"""
TEMPERATURE: Optional[float] = None
"""LLM通用对话参数"""
SUPPORT_AGENT_MODELS: Optional[List[str]] = None
"""支持的Agent模型"""
LLM_MODEL_CONFIG: Optional[Dict[str, Dict[str, Any]]] = None
"""LLM模型配置包括了不同模态初始化参数"""
MODEL_PLATFORMS: Optional[List[Dict[str, Any]]] = None
"""模型平台配置"""
MODEL_PROVIDERS_CFG_PATH_CONFIG: Optional[str] = None
"""模型平台配置文件路径"""
MODEL_PROVIDERS_CFG_HOST: Optional[str] = None
"""模型平台配置文件host"""
MODEL_PROVIDERS_CFG_PORT: Optional[int] = None
"""模型平台配置文件port"""
TOOL_CONFIG: Optional[Dict[str, Any]] = None
"""工具配置项"""
# 历史对话轮数
HISTORY_LEN = 3
@classmethod
def class_name(cls) -> str:
return cls.__name__
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度
MAX_TOKENS = None
def __str__(self):
return self.to_json()
# LLM通用对话参数
TEMPERATURE = 0.7
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数
SUPPORT_AGENT_MODELS = [
@dataclass
class ConfigModelFactory(core_config.ConfigFactory[ConfigModel]):
"""ConfigModel工厂类"""
def __init__(self):
# 默认选用的 LLM 名称
self.DEFAULT_LLM_MODEL = "chatglm3-6b"
# 默认选用的 Embedding 名称
self.DEFAULT_EMBEDDING_MODEL = "bge-large-zh-v1.5"
# AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])
self.Agent_MODEL = None
# 历史对话轮数
self.HISTORY_LEN = 3
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度
self.MAX_TOKENS = None
# LLM通用对话参数
self.TEMPERATURE = 0.7
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数
self.SUPPORT_AGENT_MODELS = [
"chatglm3-6b",
"openai-api",
"Qwen-14B-Chat",
"Qwen-7B-Chat",
"qwen-turbo",
]
]
self.MODEL_PROVIDERS_CFG_PATH_CONFIG = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"model_providers.yaml")
self.MODEL_PROVIDERS_CFG_HOST = "127.0.0.1"
LLM_MODEL_CONFIG = {
self.MODEL_PROVIDERS_CFG_PORT = 20000
self._init_llm_work_config()
def _init_llm_work_config(self):
"""初始化知识库runtime的一些配置"""
self.LLM_MODEL_CONFIG = {
# 意图识别不需要输出,模型后台知道就行
"preprocess_model": {
DEFAULT_LLM_MODEL: {
self.DEFAULT_LLM_MODEL: {
"temperature": 0.05,
"max_tokens": 4096,
"history_len": 100,
@ -41,7 +103,7 @@ LLM_MODEL_CONFIG = {
},
},
"llm_model": {
DEFAULT_LLM_MODEL: {
self.DEFAULT_LLM_MODEL: {
"temperature": 0.9,
"max_tokens": 4096,
"history_len": 10,
@ -50,7 +112,7 @@ LLM_MODEL_CONFIG = {
},
},
"action_model": {
DEFAULT_LLM_MODEL: {
self.DEFAULT_LLM_MODEL: {
"temperature": 0.01,
"max_tokens": 4096,
"prompt_name": "ChatGLM3",
@ -58,7 +120,7 @@ LLM_MODEL_CONFIG = {
},
},
"postprocess_model": {
DEFAULT_LLM_MODEL: {
self.DEFAULT_LLM_MODEL: {
"temperature": 0.01,
"max_tokens": 4096,
"prompt_name": "default",
@ -70,17 +132,16 @@ LLM_MODEL_CONFIG = {
"size": "256*256",
}
}
}
}
# 可以通过 model_providers 提供转换不同平台的接口为openai endpoint的能力启动后下面变量会自动增加相应的平台
# ### 如果您已经有了一个openai endpoint的能力的地址可以在这里直接配置
# - platform_name 可以任意填写,不要重复即可
# - platform_type 以后可能根据平台类型做一些功能区分,与platform_name一致即可
# - 将框架部署的模型填写到对应列表即可。不同框架可以加载同名模型,项目会自动做负载均衡。
# 可以通过 model_providers 提供转换不同平台的接口为openai endpoint的能力启动后下面变量会自动增加相应的平台
# ### 如果您已经有了一个openai endpoint的能力的地址可以在这里直接配置
# - platform_name 可以任意填写,不要重复即可
# - platform_type 以后可能根据平台类型做一些功能区分,与platform_name一致即可
# - 将框架部署的模型填写到对应列表即可。不同框架可以加载同名模型,项目会自动做负载均衡。
# 创建一个全局的共享字典
MODEL_PLATFORMS = [
# 创建一个全局的共享字典
self.MODEL_PLATFORMS = [
{
"platform_name": "oneapi",
@ -138,14 +199,9 @@ MODEL_PLATFORMS = [
"tts_models": [],
},
]
MODEL_PROVIDERS_CFG_PATH_CONFIG = os.path.join(os.path.dirname(os.path.abspath(__file__)), "model_providers.yaml")
MODEL_PROVIDERS_CFG_HOST = "127.0.0.1"
MODEL_PROVIDERS_CFG_PORT = 20000
# 工具配置项
TOOL_CONFIG = {
]
# 工具配置项
self.TOOL_CONFIG = {
"search_local_knowledgebase": {
"use": False,
"top_k": 3,
@ -244,17 +300,151 @@ TOOL_CONFIG = {
# 务必评估是否需要开启read_only,开启后会对sql语句进行检查请确认text2sql.py中的intercept_sql拦截器是否满足你使用的数据库只读要求
# 优先推荐从数据库层面对用户权限进行限制
"read_only": False,
#限定返回的行数
"top_k":50,
#是否返回中间步骤
# 限定返回的行数
"top_k": 50,
# 是否返回中间步骤
"return_intermediate_steps": True,
#如果想指定特定表,请填写表名称,如["sys_user","sys_dept"],不填写走智能判断应该使用哪些表
"table_names":[],
#对表名进行额外说明辅助大模型更好的判断应该使用哪些表尤其是SQLDatabaseSequentialChain模式下,是根据表名做的预测,很容易误判。
"table_comments":{
# 如果想指定特定表,请填写表名称,如["sys_user","sys_dept"],不填写走智能判断应该使用哪些表
"table_names": [],
# 对表名进行额外说明辅助大模型更好的判断应该使用哪些表尤其是SQLDatabaseSequentialChain模式下,是根据表名做的预测,很容易误判。
"table_comments": {
# 如果出现大模型选错表的情况,可尝试根据实际情况填写表名和说明
# "tableA":"这是一个用户表,存储了用户的基本信息",
# "tanleB":"角色表",
}
},
}
}
def default_llm_model(self, llm_model: str):
self.DEFAULT_LLM_MODEL = llm_model
def default_embedding_model(self, embedding_model: str):
self.DEFAULT_EMBEDDING_MODEL = embedding_model
def agent_model(self, agent_model: str):
self.Agent_MODEL = agent_model
def history_len(self, history_len: int):
self.HISTORY_LEN = history_len
def max_tokens(self, max_tokens: int):
self.MAX_TOKENS = max_tokens
def temperature(self, temperature: float):
self.TEMPERATURE = temperature
def support_agent_models(self, support_agent_models: List[str]):
self.SUPPORT_AGENT_MODELS = support_agent_models
def model_providers_cfg_path_config(self, model_providers_cfg_path_config: str):
self.MODEL_PROVIDERS_CFG_PATH_CONFIG = model_providers_cfg_path_config
def model_providers_cfg_host(self, model_providers_cfg_host: str):
self.MODEL_PROVIDERS_CFG_HOST = model_providers_cfg_host
def model_providers_cfg_port(self, model_providers_cfg_port: int):
self.MODEL_PROVIDERS_CFG_PORT = model_providers_cfg_port
def get_config(self) -> ConfigModel:
config = ConfigModel()
config.DEFAULT_LLM_MODEL = self.DEFAULT_LLM_MODEL
config.DEFAULT_EMBEDDING_MODEL = self.DEFAULT_EMBEDDING_MODEL
config.Agent_MODEL = self.Agent_MODEL
config.HISTORY_LEN = self.HISTORY_LEN
config.MAX_TOKENS = self.MAX_TOKENS
config.TEMPERATURE = self.TEMPERATURE
config.SUPPORT_AGENT_MODELS = self.SUPPORT_AGENT_MODELS
config.LLM_MODEL_CONFIG = self.LLM_MODEL_CONFIG
config.MODEL_PLATFORMS = self.MODEL_PLATFORMS
config.MODEL_PROVIDERS_CFG_PATH_CONFIG = self.MODEL_PROVIDERS_CFG_PATH_CONFIG
config.MODEL_PROVIDERS_CFG_HOST = self.MODEL_PROVIDERS_CFG_HOST
config.MODEL_PROVIDERS_CFG_PORT = self.MODEL_PROVIDERS_CFG_PORT
config.TOOL_CONFIG = self.TOOL_CONFIG
return config
class ConfigModelWorkSpace(core_config.ConfigWorkSpace[ConfigModelFactory, ConfigModel]):
"""
工作空间的配置预设, 提供ConfigModel建造方法产生实例
"""
config_factory_cls = ConfigModelFactory
def __init__(self):
super().__init__()
def _build_config_factory(self, config_json: Any) -> ConfigModelFactory:
_config_factory = self.config_factory_cls()
if config_json.get("DEFAULT_LLM_MODEL"):
_config_factory.default_llm_model(config_json.get("DEFAULT_LLM_MODEL"))
if config_json.get("DEFAULT_EMBEDDING_MODEL"):
_config_factory.default_embedding_model(config_json.get("DEFAULT_EMBEDDING_MODEL"))
if config_json.get("Agent_MODEL"):
_config_factory.agent_model(config_json.get("Agent_MODEL"))
if config_json.get("HISTORY_LEN"):
_config_factory.history_len(config_json.get("HISTORY_LEN"))
if config_json.get("MAX_TOKENS"):
_config_factory.max_tokens(config_json.get("MAX_TOKENS"))
if config_json.get("TEMPERATURE"):
_config_factory.temperature(config_json.get("TEMPERATURE"))
if config_json.get("SUPPORT_AGENT_MODELS"):
_config_factory.support_agent_models(config_json.get("SUPPORT_AGENT_MODELS"))
if config_json.get("MODEL_PROVIDERS_CFG_PATH_CONFIG"):
_config_factory.model_providers_cfg_path_config(config_json.get("MODEL_PROVIDERS_CFG_PATH_CONFIG"))
if config_json.get("MODEL_PROVIDERS_CFG_HOST"):
_config_factory.model_providers_cfg_host(config_json.get("MODEL_PROVIDERS_CFG_HOST"))
if config_json.get("MODEL_PROVIDERS_CFG_PORT"):
_config_factory.model_providers_cfg_port(config_json.get("MODEL_PROVIDERS_CFG_PORT"))
return _config_factory
@classmethod
def get_type(cls) -> str:
return ConfigModel.class_name()
def get_config(self) -> ConfigModel:
return self._config_factory.get_config()
def set_default_llm_model(self, llm_model: str):
self._config_factory.default_llm_model(llm_model)
self.store_config()
def set_default_embedding_model(self, embedding_model: str):
self._config_factory.default_embedding_model(embedding_model)
self.store_config()
def set_agent_model(self, agent_model: str):
self._config_factory.agent_model(agent_model)
self.store_config()
def set_history_len(self, history_len: int):
self._config_factory.history_len(history_len)
self.store_config()
def set_max_tokens(self, max_tokens: int):
self._config_factory.max_tokens(max_tokens)
self.store_config()
def set_temperature(self, temperature: float):
self._config_factory.temperature(temperature)
self.store_config()
def set_support_agent_models(self, support_agent_models: List[str]):
self._config_factory.support_agent_models(support_agent_models)
self.store_config()
def set_model_providers_cfg_path_config(self, model_providers_cfg_path_config: str):
self._config_factory.model_providers_cfg_path_config(model_providers_cfg_path_config)
self.store_config()
def set_model_providers_cfg_host(self, model_providers_cfg_host: str):
self._config_factory.model_providers_cfg_host(model_providers_cfg_host)
self.store_config()
def set_model_providers_cfg_port(self, model_providers_cfg_port: int):
self._config_factory.model_providers_cfg_port(model_providers_cfg_port)
self.store_config()
config_model_workspace: ConfigModelWorkSpace = ConfigModelWorkSpace()

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-chatchat"
version = "0.3.0.20240606"
version = "0.3.0.20240611"
description = ""
authors = ["chatchat"]
readme = "README.md"
@ -121,7 +121,6 @@ extended_testing = [
"xmltodict",
"faiss-cpu",
"openapi-pydantic",
"markdownify",
"arxiv",
"sqlite-vss",
"rapidocr-onnxruntime",

View File

@ -1,6 +1,10 @@
from pathlib import Path
from chatchat.configs import ConfigBasicFactory, ConfigBasic, ConfigBasicWorkSpace
from chatchat.configs import (
ConfigBasicFactory,
ConfigBasic,
ConfigBasicWorkSpace
)
import os
@ -36,3 +40,6 @@ def test_workspace_default():
assert LOG_FORMAT is not None
assert LOG_PATH is not None
assert MEDIA_PATH is not None
def test_config_model_workspace():