mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-19 21:37:20 +08:00
* close #1172: 给webui_page/utils添加一些log信息,方便定位错误 * 修复:重建知识库时页面未实时显示进度 * skip model_worker running when using online model api such as chatgpt * 修改知识库管理相关内容: 1.KnowledgeFileModel增加3个字段:file_mtime(文件修改时间),file_size(文件大小),custom_docs(是否使用自定义docs)。为后面比对上传文件做准备。 2.给所有String字段加上长度,防止mysql建表错误(pr#1177) 3.统一[faiss/milvus/pgvector]_kb_service.add_doc接口,使其支持自定义docs 4.为faiss_kb_service增加一些方法,便于调用 5.为KnowledgeFile增加一些方法,便于获取文件信息,缓存file2text的结果。 * 修复/chat/fastchat无法流式输出的问题 * 新增功能: 1、KnowledgeFileModel增加"docs_count"字段,代表该文件加载到向量库中的Document数量,并在WEBUI中进行展示。 2、重建知识库`python init_database.py --recreate-vs`支持多线程。 其它: 统一代码中知识库相关函数用词:file代表一个文件名称或路径,doc代表langchain加载后的Document。部分与API接口有关或含义重叠的函数暂未修改。 --------- Co-authored-by: liunux4odoo <liunux@qq.com>, hongkong9771
56 lines
1.6 KiB
Python
56 lines
1.6 KiB
Python
from fastapi.responses import StreamingResponse
|
|
from typing import List
|
|
import openai
|
|
from configs.model_config import llm_model_dict, LLM_MODEL, logger
|
|
from pydantic import BaseModel
|
|
|
|
|
|
class OpenAiMessage(BaseModel):
|
|
role: str = "user"
|
|
content: str = "hello"
|
|
|
|
|
|
class OpenAiChatMsgIn(BaseModel):
|
|
model: str = LLM_MODEL
|
|
messages: List[OpenAiMessage]
|
|
temperature: float = 0.7
|
|
n: int = 1
|
|
max_tokens: int = 1024
|
|
stop: List[str] = []
|
|
stream: bool = False
|
|
presence_penalty: int = 0
|
|
frequency_penalty: int = 0
|
|
|
|
|
|
async def openai_chat(msg: OpenAiChatMsgIn):
|
|
openai.api_key = llm_model_dict[LLM_MODEL]["api_key"]
|
|
print(f"{openai.api_key=}")
|
|
openai.api_base = llm_model_dict[LLM_MODEL]["api_base_url"]
|
|
print(f"{openai.api_base=}")
|
|
print(msg)
|
|
|
|
def get_response(msg):
|
|
data = msg.dict()
|
|
|
|
try:
|
|
response = openai.ChatCompletion.create(**data)
|
|
if msg.stream:
|
|
for data in response:
|
|
if choices := data.choices:
|
|
if chunk := choices[0].get("delta", {}).get("content"):
|
|
print(chunk, end="", flush=True)
|
|
yield chunk
|
|
else:
|
|
if response.choices:
|
|
answer = response.choices[0].message.content
|
|
print(answer)
|
|
yield(answer)
|
|
except Exception as e:
|
|
print(type(e))
|
|
logger.error(e)
|
|
|
|
return StreamingResponse(
|
|
get_response(msg),
|
|
media_type='text/event-stream',
|
|
)
|