mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-26 00:33:35 +08:00
## 🛠 新增功能 - 支持百川在线模型 (@hzg0601 @liunux4odoo in #1623) - 支持 Azure OpenAI 与 claude 等 Langchain 自带模型 (@zRzRzRzRzRzRzR in #1808) - Agent 功能大量更新,支持更多的工具、更换提示词、检索知识库 (@zRzRzRzRzRzRzR in #1626 #1666 #1785) - 加长 32k 模型的历史记录 (@zRzRzRzRzRzRzR in #1629 #1630) - *_chat 接口支持 max_tokens 参数 (@liunux4odoo in #1744) - 实现 API 和 WebUI 的前后端分离 (@liunux4odoo in #1772) - 支持 zlilliz 向量库 (@zRzRzRzRzRzRzR in #1785) - 支持 metaphor 搜索引擎 (@liunux4odoo in #1792) - 支持 p-tuning 模型 (@hzg0601 in #1810) - 更新完善文档和 Wiki (@imClumsyPanda @zRzRzRzRzRzRzR @glide-the in #1680 #1811) ## 🐞 问题修复 - 修复 bge-* 模型匹配超过 1 的问题 (@zRzRzRzRzRzRzR in #1652) - 修复系统代理为空的问题 (@glide-the in #1654) - 修复重建知识库时 `d == self.d assert error` (@liunux4odoo in #1766) - 修复对话历史消息错误 (@liunux4odoo in #1801) - 修复 OpenAI 无法调用的 bug (@zRzRzRzRzRzRzR in #1808) - 修复 windows下 BIND_HOST=0.0.0.0 时对话出错的问题 (@hzg0601 in #1810)
59 lines
1.8 KiB
Python
59 lines
1.8 KiB
Python
from fastapi.responses import StreamingResponse
|
||
from typing import List
|
||
import openai
|
||
from configs import LLM_MODEL, logger, log_verbose
|
||
from server.utils import get_model_worker_config, fschat_openai_api_address
|
||
from pydantic import BaseModel
|
||
|
||
|
||
class OpenAiMessage(BaseModel):
|
||
role: str = "user"
|
||
content: str = "hello"
|
||
|
||
|
||
class OpenAiChatMsgIn(BaseModel):
|
||
model: str = LLM_MODEL
|
||
messages: List[OpenAiMessage]
|
||
temperature: float = 0.7
|
||
n: int = 1
|
||
max_tokens: int = None
|
||
stop: List[str] = []
|
||
stream: bool = False
|
||
presence_penalty: int = 0
|
||
frequency_penalty: int = 0
|
||
|
||
|
||
async def openai_chat(msg: OpenAiChatMsgIn):
|
||
config = get_model_worker_config(msg.model)
|
||
openai.api_key = config.get("api_key", "EMPTY")
|
||
print(f"{openai.api_key=}")
|
||
openai.api_base = config.get("api_base_url", fschat_openai_api_address())
|
||
print(f"{openai.api_base=}")
|
||
print(msg)
|
||
|
||
async def get_response(msg):
|
||
data = msg.dict()
|
||
|
||
try:
|
||
response = await openai.ChatCompletion.acreate(**data)
|
||
if msg.stream:
|
||
async for data in response:
|
||
if choices := data.choices:
|
||
if chunk := choices[0].get("delta", {}).get("content"):
|
||
print(chunk, end="", flush=True)
|
||
yield chunk
|
||
else:
|
||
if response.choices:
|
||
answer = response.choices[0].message.content
|
||
print(answer)
|
||
yield(answer)
|
||
except Exception as e:
|
||
msg = f"获取ChatCompletion时出错:{e}"
|
||
logger.error(f'{e.__class__.__name__}: {msg}',
|
||
exc_info=e if log_verbose else None)
|
||
|
||
return StreamingResponse(
|
||
get_response(msg),
|
||
media_type='text/event-stream',
|
||
)
|