更新模型配置文件和支持列表

This commit is contained in:
zR 2024-01-20 22:40:31 +08:00
parent 8c2019f8b9
commit 0cf65d5933
3 changed files with 83 additions and 133 deletions

View File

@ -6,9 +6,9 @@ import os
MODEL_ROOT_PATH = "" MODEL_ROOT_PATH = ""
# 选用的 Embedding 名称 # 选用的 Embedding 名称
EMBEDDING_MODEL = "bge-large-zh" EMBEDDING_MODEL = "bge-large-zh-v1.5"
# Embedding 模型运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 # Embedding 模型运行设备。设为"auto"会自动检测(会有警告),也可手动设定为 "cuda","mps","cpu","xpu" 其中之一。
EMBEDDING_DEVICE = "auto" EMBEDDING_DEVICE = "auto"
# 选用的reranker模型 # 选用的reranker模型
@ -26,50 +26,32 @@ EMBEDDING_MODEL_OUTPUT_PATH = "output"
# 在这里我们使用目前主流的两个离线模型其中chatglm3-6b 为默认加载模型。 # 在这里我们使用目前主流的两个离线模型其中chatglm3-6b 为默认加载模型。
# 如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。 # 如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。
# chatglm3-6b输出角色标签<|user|>及自问自答的问题详见项目wiki->常见问题->Q20. LLM_MODELS = ["zhipu-api"]
LLM_MODELS = ["chatglm3-6b", "zhipu-api", "openai-api"] # "Qwen-1_8B-Chat",
# AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])
Agent_MODEL = None Agent_MODEL = None
# LLM 运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 LLM_DEVICE = "cuda"
LLM_DEVICE = "auto"
# 历史对话轮数
HISTORY_LEN = 3 HISTORY_LEN = 3
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度 MAX_TOKENS = 2048
MAX_TOKENS = None
# LLM通用对话参数
TEMPERATURE = 0.7 TEMPERATURE = 0.7
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数
ONLINE_LLM_MODEL = { ONLINE_LLM_MODEL = {
# 线上模型。请在server_config中为每个在线API设置不同的端口
"openai-api": { "openai-api": {
"model_name": "gpt-3.5-turbo", "model_name": "gpt-4",
"api_base_url": "https://api.openai.com/v1", "api_base_url": "https://api.openai.com/v1",
"api_key": "", "api_key": "",
"openai_proxy": "", "openai_proxy": "",
}, },
# 获取api_key请前往https://makersuite.google.com/或者google cloud使用前先确认网络正常使用代理请在项目启动python startup.py -a)环境内设置https_proxy环境变量 # 智谱AI API不支持GLM4本版本无法兼容敬请期待0.3.x具体注册及api key获取请前往 http://open.bigmodel.cn
"gemini-api": {
"api_key": "",
"provider": "GeminiWorker",
},
# 具体注册及api key获取请前往 http://open.bigmodel.cn
"zhipu-api": { "zhipu-api": {
"api_key": "", "api_key": "",
"version": "chatglm_turbo", # 可选包括 "chatglm_turbo" "version": "chatglm_turbo",
"provider": "ChatGLMWorker", "provider": "ChatGLMWorker",
}, },
# 具体注册及api key获取请前往 https://api.minimax.chat/ # 具体注册及api key获取请前往 https://api.minimax.chat/
"minimax-api": { "minimax-api": {
"group_id": "", "group_id": "",
@ -78,7 +60,6 @@ ONLINE_LLM_MODEL = {
"provider": "MiniMaxWorker", "provider": "MiniMaxWorker",
}, },
# 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/ # 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/
"xinghuo-api": { "xinghuo-api": {
"APPID": "", "APPID": "",
@ -99,8 +80,8 @@ ONLINE_LLM_MODEL = {
# 火山方舟 API文档参考 https://www.volcengine.com/docs/82379 # 火山方舟 API文档参考 https://www.volcengine.com/docs/82379
"fangzhou-api": { "fangzhou-api": {
"version": "chatglm-6b-model", # 当前支持 "chatglm-6b-model" 更多的见文档模型支持列表中方舟部分。 "version": "chatglm-6b-model",
"version_url": "", # 可以不填写version直接填写在方舟申请模型发布的API地址 "version_url": "",
"api_key": "", "api_key": "",
"secret_key": "", "secret_key": "",
"provider": "FangZhouWorker", "provider": "FangZhouWorker",
@ -108,15 +89,15 @@ ONLINE_LLM_MODEL = {
# 阿里云通义千问 API文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details # 阿里云通义千问 API文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details
"qwen-api": { "qwen-api": {
"version": "qwen-turbo", # 可选包括 "qwen-turbo", "qwen-plus" "version": "qwen-max",
"api_key": "", # 请在阿里云控制台模型服务灵积API-KEY管理页面创建 "api_key": "",
"provider": "QwenWorker", "provider": "QwenWorker",
"embed_model": "text-embedding-v1" # embedding 模型名称 "embed_model": "text-embedding-v1" # embedding 模型名称
}, },
# 百川 API申请方式请参考 https://www.baichuan-ai.com/home#api-enter # 百川 API申请方式请参考 https://www.baichuan-ai.com/home#api-enter
"baichuan-api": { "baichuan-api": {
"version": "Baichuan2-53B", # 当前支持 "Baichuan2-53B" 见官方文档。 "version": "Baichuan2-53B",
"api_key": "", "api_key": "",
"secret_key": "", "secret_key": "",
"provider": "BaiChuanWorker", "provider": "BaiChuanWorker",
@ -138,6 +119,11 @@ ONLINE_LLM_MODEL = {
"secret_key": "", "secret_key": "",
"provider": "TianGongWorker", "provider": "TianGongWorker",
}, },
# Gemini API (开发组未测试由社群提供只支持pro
"gemini-api": {
"api_key": "",
"provider": "GeminiWorker",
}
} }
@ -149,6 +135,7 @@ ONLINE_LLM_MODEL = {
# - GanymedeNil/text2vec-large-chinese # - GanymedeNil/text2vec-large-chinese
# - text2vec-large-chinese # - text2vec-large-chinese
# 2.2 如果以上本地路径不存在则使用huggingface模型 # 2.2 如果以上本地路径不存在则使用huggingface模型
MODEL_PATH = { MODEL_PATH = {
"embed_model": { "embed_model": {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh", "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
@ -167,7 +154,7 @@ MODEL_PATH = {
"bge-large-zh": "BAAI/bge-large-zh", "bge-large-zh": "BAAI/bge-large-zh",
"bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct", "bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct",
"bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5", "bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5",
"bge-large-zh-v1.5": "BAAI/bge-large-zh-v1.5", "bge-large-zh-v1.5": "/share/home/zyx/Models/bge-large-zh-v1.5",
"piccolo-base-zh": "sensenova/piccolo-base-zh", "piccolo-base-zh": "sensenova/piccolo-base-zh",
"piccolo-large-zh": "sensenova/piccolo-large-zh", "piccolo-large-zh": "sensenova/piccolo-large-zh",
"nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large", "nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large",
@ -175,55 +162,55 @@ MODEL_PATH = {
}, },
"llm_model": { "llm_model": {
# 以下部分模型并未完全测试仅根据fastchat和vllm模型的模型列表推定支持
"chatglm2-6b": "THUDM/chatglm2-6b", "chatglm2-6b": "THUDM/chatglm2-6b",
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k", "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
"chatglm3-6b": "THUDM/chatglm3-6b", "chatglm3-6b": "THUDM/chatglm3-6b",
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k", "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
"chatglm3-6b-base": "THUDM/chatglm3-6b-base",
"Qwen-1_8B": "Qwen/Qwen-1_8B", "Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat", "Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
"Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8", "Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf",
"Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4",
"Qwen-7B": "Qwen/Qwen-7B", "Qwen-1_8B-Chat": "/media/checkpoint/Qwen-1_8B-Chat",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat", "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"Qwen-14B": "Qwen/Qwen-14B",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat", "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
"Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8",
# 在新版的transformers下需要手动修改模型的config.json文件在quantization_config字典中
# 增加`disable_exllama:true` 字段才能启动qwen的量化模型
"Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4",
"Qwen-72B": "Qwen/Qwen-72B",
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat", "Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
"Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8",
"Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4",
"baichuan2-13b": "baichuan-inc/Baichuan2-13B-Chat", "baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
"baichuan2-7b": "baichuan-inc/Baichuan2-7B-Chat",
"baichuan-7b": "baichuan-inc/Baichuan-7B",
"baichuan-13b": "baichuan-inc/Baichuan-13B",
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat", "baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
"baichuan2-7b-chat": "baichuan-inc/Baichuan2-7B-Chat",
"aquila-7b": "BAAI/Aquila-7B", "baichuan2-13b-chat": "baichuan-inc/Baichuan2-13B-Chat",
"aquilachat-7b": "BAAI/AquilaChat-7B",
"internlm-7b": "internlm/internlm-7b", "internlm-7b": "internlm/internlm-7b",
"internlm-chat-7b": "internlm/internlm-chat-7b", "internlm-chat-7b": "internlm/internlm-chat-7b",
"internlm2-chat-7b": "internlm/internlm2-chat-7b",
"internlm2-chat-20b": "internlm/internlm2-chat-20b",
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat",
"BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k",
"Yi-34B-Chat": "https://huggingface.co/01-ai/Yi-34B-Chat",
"agentlm-7b": "THUDM/agentlm-7b",
"agentlm-13b": "THUDM/agentlm-13b",
"agentlm-70b": "THUDM/agentlm-70b",
"falcon-7b": "tiiuae/falcon-7b", "falcon-7b": "tiiuae/falcon-7b",
"falcon-40b": "tiiuae/falcon-40b", "falcon-40b": "tiiuae/falcon-40,b",
"falcon-rw-7b": "tiiuae/falcon-rw-7b", "falcon-rw-7b": "tiiuae/falcon-rw-7b",
"aquila-7b": "BAAI/Aquila-7B",
"aquilachat-7b": "BAAI/AquilaChat-7B",
"open_llama_13b": "openlm-research/open_llama_13b",
"vicuna-13b-v1.5": "lmsys/vicuna-13b-v1.5",
"koala": "young-geng/koala",
"mpt-7b": "mosaicml/mpt-7b",
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
"mpt-30b": "mosaicml/mpt-30b",
"opt-66b": "facebook/opt-66b",
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
"gpt2": "gpt2", "gpt2": "gpt2",
"gpt2-xl": "gpt2-xl", "gpt2-xl": "gpt2-xl",
"gpt-j-6b": "EleutherAI/gpt-j-6b", "gpt-j-6b": "EleutherAI/gpt-j-6b",
"gpt4all-j": "nomic-ai/gpt4all-j", "gpt4all-j": "nomic-ai/gpt4all-j",
"gpt-neox-20b": "EleutherAI/gpt-neox-20b", "gpt-neox-20b": "EleutherAI/gpt-neox-20b",
@ -231,63 +218,50 @@ MODEL_PATH = {
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
"dolly-v2-12b": "databricks/dolly-v2-12b", "dolly-v2-12b": "databricks/dolly-v2-12b",
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b", "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
"Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf",
"Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
"open_llama_13b": "openlm-research/open_llama_13b",
"vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3",
"koala": "young-geng/koala",
"mpt-7b": "mosaicml/mpt-7b",
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
"mpt-30b": "mosaicml/mpt-30b",
"opt-66b": "facebook/opt-66b",
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
"agentlm-7b": "THUDM/agentlm-7b",
"agentlm-13b": "THUDM/agentlm-13b",
"agentlm-70b": "THUDM/agentlm-70b",
"Yi-34B-Chat": "01-ai/Yi-34B-Chat",
}, },
"reranker":{ "reranker": {
"bge-reranker-large":"BAAI/bge-reranker-large", "bge-reranker-large": "BAAI/bge-reranker-large",
"bge-reranker-base":"BAAI/bge-reranker-base", "bge-reranker-base": "BAAI/bge-reranker-base",
#TODO 增加在线reranker如cohere
} }
} }
# 通常情况下不需要更改以下内容 # 通常情况下不需要更改以下内容
# nltk 模型存储路径 # nltk 模型存储路径
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data") NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
# 使用VLLM可能导致模型推理能力下降无法完成Agent任务
VLLM_MODEL_DICT = { VLLM_MODEL_DICT = {
"aquila-7b": "BAAI/Aquila-7B",
"aquilachat-7b": "BAAI/AquilaChat-7B",
"baichuan-7b": "baichuan-inc/Baichuan-7B",
"baichuan-13b": "baichuan-inc/Baichuan-13B",
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
"chatglm2-6b": "THUDM/chatglm2-6b", "chatglm2-6b": "THUDM/chatglm2-6b",
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k", "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
"chatglm3-6b": "THUDM/chatglm3-6b", "chatglm3-6b": "THUDM/chatglm3-6b",
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k", "chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
"Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
"Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
"Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf",
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
"baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
"baichuan2-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
"baichuan2-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat", "BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat",
"BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k", "BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k",
# 注意bloom系列的tokenizer与model是分离的因此虽然vllm支持但与fschat框架不兼容
# "bloom": "bigscience/bloom",
# "bloomz": "bigscience/bloomz",
# "bloomz-560m": "bigscience/bloomz-560m",
# "bloomz-7b1": "bigscience/bloomz-7b1",
# "bloomz-1b7": "bigscience/bloomz-1b7",
"internlm-7b": "internlm/internlm-7b", "internlm-7b": "internlm/internlm-7b",
"internlm-chat-7b": "internlm/internlm-chat-7b", "internlm-chat-7b": "internlm/internlm-chat-7b",
"internlm2-chat-7b": "internlm/Models/internlm2-chat-7b",
"internlm2-chat-20b": "internlm/Models/internlm2-chat-20b",
"aquila-7b": "BAAI/Aquila-7B",
"aquilachat-7b": "BAAI/AquilaChat-7B",
"falcon-7b": "tiiuae/falcon-7b", "falcon-7b": "tiiuae/falcon-7b",
"falcon-40b": "tiiuae/falcon-40b", "falcon-40b": "tiiuae/falcon-40b",
"falcon-rw-7b": "tiiuae/falcon-rw-7b", "falcon-rw-7b": "tiiuae/falcon-rw-7b",
@ -300,8 +274,6 @@ VLLM_MODEL_DICT = {
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
"dolly-v2-12b": "databricks/dolly-v2-12b", "dolly-v2-12b": "databricks/dolly-v2-12b",
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b", "stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
"Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf",
"Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
"open_llama_13b": "openlm-research/open_llama_13b", "open_llama_13b": "openlm-research/open_llama_13b",
"vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3", "vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3",
"koala": "young-geng/koala", "koala": "young-geng/koala",
@ -311,32 +283,8 @@ VLLM_MODEL_DICT = {
"opt-66b": "facebook/opt-66b", "opt-66b": "facebook/opt-66b",
"opt-iml-max-30b": "facebook/opt-iml-max-30b", "opt-iml-max-30b": "facebook/opt-iml-max-30b",
"Qwen-1_8B": "Qwen/Qwen-1_8B",
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
"Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8",
"Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4",
"Qwen-7B": "Qwen/Qwen-7B",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"Qwen-14B": "Qwen/Qwen-14B",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
"Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8",
"Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4",
"Qwen-72B": "Qwen/Qwen-72B",
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
"Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8",
"Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4",
"agentlm-7b": "THUDM/agentlm-7b",
"agentlm-13b": "THUDM/agentlm-13b",
"agentlm-70b": "THUDM/agentlm-70b",
} }
# 你认为支持Agent能力的模型可以在这里添加添加后不会出现可视化界面的警告
# 经过我们测试原生支持Agent的模型仅有以下几个
SUPPORT_AGENT_MODEL = [ SUPPORT_AGENT_MODEL = [
"azure-api", "azure-api",
"openai-api", "openai-api",
@ -344,4 +292,6 @@ SUPPORT_AGENT_MODEL = [
"Qwen", "Qwen",
"chatglm3", "chatglm3",
"xinghuo-api", "xinghuo-api",
"internlm2-chat-7b",
"internlm2-chat-20b"
] ]

View File

@ -20,6 +20,6 @@ def weather(location: str, api_key: str):
def weathercheck(location: str): def weathercheck(location: str):
return weather(location, "S8vrB4U_-c5mvAMiK") return weather(location, "your keys")
class WeatherInput(BaseModel): class WeatherInput(BaseModel):
location: str = Field(description="City name,include city and county,like '厦门'") location: str = Field(description="City name,include city and county")

View File

@ -18,7 +18,7 @@ class GeminiWorker(ApiModelWorker):
**kwargs, **kwargs,
): ):
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr) kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
kwargs.setdefault("context_len", 4096) #TODO 16K模型需要改成16384 kwargs.setdefault("context_len", 4096)
super().__init__(**kwargs) super().__init__(**kwargs)
def create_gemini_messages(self,messages) -> json: def create_gemini_messages(self,messages) -> json:
@ -47,10 +47,10 @@ class GeminiWorker(ApiModelWorker):
params.load_config(self.model_names[0]) params.load_config(self.model_names[0])
data = self.create_gemini_messages(messages=params.messages) data = self.create_gemini_messages(messages=params.messages)
generationConfig=dict( generationConfig=dict(
temperature = params.temperature, temperature=params.temperature,
topK = 1, topK=1,
topP = 1, topP=1,
maxOutputTokens = 4096, maxOutputTokens=4096,
stopSequences=[] stopSequences=[]
) )