liunux4odoo b4c68ddd05
优化在线 API ,支持 completion 和 embedding,简化在线 API 开发方式 (#1886)
* 优化在线 API ,支持 completion 和 embedding,简化在线 API 开发方式

新功能
- 智谱AI、Minimax、千帆、千问 4 个在线模型支持 embeddings(不通过Fastchat,后续会单独提供相关api接口)
- 在线模型自动检测传入参数,在传入非 messages 格式的 prompt 时,自动转换为 completion 形式,以支持 completion 接口

开发者:
- 重构ApiModelWorker:
  - 所有在线 API 请求封装到 do_chat 方法:自动传入参数 ApiChatParams,简化参数与配置项的获取;自动处理与fastchat的接口
  - 加强 API 请求错误处理,返回更有意义的信息
  - 改用 qianfan sdk 重写 qianfan-api
  - 将所有在线模型的测试用例统一在一起,简化测试用例编写

* Delete requirements_langflow.txt
2023-10-26 22:44:48 +08:00

102 lines
3.3 KiB
Python

import json
import sys
from fastchat.conversation import Conversation
from configs import TEMPERATURE
from http import HTTPStatus
from typing import List, Literal, Dict
from fastchat import conversation as conv
from server.model_workers.base import *
from server.model_workers.base import ApiEmbeddingsParams
class QwenWorker(ApiModelWorker):
DEFAULT_EMBED_MODEL = "text-embedding-v1"
def __init__(
self,
*,
version: Literal["qwen-turbo", "qwen-plus"] = "qwen-turbo",
model_names: List[str] = ["qwen-api"],
controller_addr: str = None,
worker_addr: str = None,
**kwargs,
):
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
kwargs.setdefault("context_len", 16384)
super().__init__(**kwargs)
self.version = version
def do_chat(self, params: ApiChatParams) -> Dict:
import dashscope
params.load_config(self.model_names[0])
gen = dashscope.Generation()
responses = gen.call(
model=params.version,
temperature=params.temperature,
api_key=params.api_key,
messages=params.messages,
result_format='message', # set the result is message format.
stream=True,
)
for resp in responses:
if resp["status_code"] == 200:
if choices := resp["output"]["choices"]:
yield {
"error_code": 0,
"text": choices[0]["message"]["content"],
}
else:
yield {
"error_code": resp["status_code"],
"text": resp["message"],
}
def do_embeddings(self, params: ApiEmbeddingsParams) -> Dict:
import dashscope
params.load_config(self.model_names[0])
resp = dashscope.TextEmbedding.call(
model=params.embed_model or self.DEFAULT_EMBED_MODEL,
input=params.texts, # 最大25行
api_key=params.api_key,
)
if resp["status_code"] != 200:
return {"code": resp["status_code"], "msg": resp.message}
else:
embeddings = [x["embedding"] for x in resp["output"]["embeddings"]]
return {"code": 200, "embeddings": embeddings}
def get_embeddings(self, params):
# TODO: 支持embeddings
print("embedding")
print(params)
def make_conv_template(self, conv_template: str = None, model_path: str = None) -> Conversation:
# TODO: 确认模板是否需要修改
return conv.Conversation(
name=self.model_names[0],
system_message="你是一个聪明、对人类有帮助的人工智能,你可以对人类提出的问题给出有用、详细、礼貌的回答。",
messages=[],
roles=["user", "assistant", "system"],
sep="\n### ",
stop_str="###",
)
if __name__ == "__main__":
import uvicorn
from server.utils import MakeFastAPIOffline
from fastchat.serve.model_worker import app
worker = QwenWorker(
controller_addr="http://127.0.0.1:20001",
worker_addr="http://127.0.0.1:20007",
)
sys.modules["fastchat.serve.model_worker"].worker = worker
MakeFastAPIOffline(app)
uvicorn.run(app, port=20007)