mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-24 07:43:16 +08:00
* dev分支解决pydantic版本冲突问题,增加ollama配置,支持ollama会话和向量接口 1、因dev版本的pydantic升级到了v2版本,由于在class History(BaseModel)中使用了from server.pydantic_v1,而fastapi的引用已变为pydantic的v2版本,所以fastapi用v2版本去校验用v1版本定义的对象,当会话历史histtory不为空的时候,会报错:TypeError: BaseModel.validate() takes 2 positional arguments but 3 were given。经测试,解方法为在class History(BaseModel)中也使用v2版本即可; 2、配置文件参照其它平台配置,增加了ollama平台相关配置,会话模型用户可根据实际情况自行添加,向量模型目前支持nomic-embed-text(必须升级ollama到0.1.29以上)。 3、因ollama官方只在会话部分对openai api做了兼容,向量api暂未适配,好在langchain官方库支持OllamaEmbeddings,因而在get_Embeddings方法中添加了相关支持代码。 * 修复 pydantic 升级到 v2 后 DocumentWithVsID 和 /v1/embeddings 兼容性问题 --------- Co-authored-by: srszzw <srszzw@163.com> Co-authored-by: liunux4odoo <liunux@qq.com>
101 lines
3.2 KiB
Python
101 lines
3.2 KiB
Python
from __future__ import annotations
|
|
|
|
import re
|
|
from typing import Dict, List, Literal, Optional, Union
|
|
|
|
from fastapi import UploadFile
|
|
from server.pydantic_v2 import BaseModel, Field, AnyUrl, root_validator
|
|
from openai.types.chat import (
|
|
ChatCompletionMessageParam,
|
|
ChatCompletionToolChoiceOptionParam,
|
|
ChatCompletionToolParam,
|
|
completion_create_params,
|
|
)
|
|
|
|
from configs import DEFAULT_LLM_MODEL, TEMPERATURE, LLM_MODEL_CONFIG
|
|
|
|
|
|
class OpenAIBaseInput(BaseModel):
|
|
user: Optional[str] = None
|
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
extra_headers: Optional[Dict] = None
|
|
extra_query: Optional[Dict] = None
|
|
extra_body: Optional[Dict] = None
|
|
timeout: Optional[float] = None
|
|
|
|
class Config:
|
|
extra = "allow"
|
|
|
|
|
|
class OpenAIChatInput(OpenAIBaseInput):
|
|
messages: List[ChatCompletionMessageParam]
|
|
model: str = DEFAULT_LLM_MODEL
|
|
frequency_penalty: Optional[float] = None
|
|
function_call: Optional[completion_create_params.FunctionCall] = None
|
|
functions: List[completion_create_params.Function] = None
|
|
logit_bias: Optional[Dict[str, int]] = None
|
|
logprobs: Optional[bool] = None
|
|
max_tokens: Optional[int] = None
|
|
n: Optional[int] = None
|
|
presence_penalty: Optional[float] = None
|
|
response_format: completion_create_params.ResponseFormat = None
|
|
seed: Optional[int] = None
|
|
stop: Union[Optional[str], List[str]] = None
|
|
stream: Optional[bool] = None
|
|
temperature: Optional[float] = TEMPERATURE
|
|
tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None
|
|
tools: List[ChatCompletionToolParam] = None
|
|
top_logprobs: Optional[int] = None
|
|
top_p: Optional[float] = None
|
|
|
|
|
|
class OpenAIEmbeddingsInput(OpenAIBaseInput):
|
|
input: Union[str, List[str]]
|
|
model: str
|
|
dimensions: Optional[int] = None
|
|
encoding_format: Optional[Literal["float", "base64"]] = None
|
|
|
|
|
|
class OpenAIImageBaseInput(OpenAIBaseInput):
|
|
model: str
|
|
n: int = 1
|
|
response_format: Optional[Literal["url", "b64_json"]] = None
|
|
size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] = "256x256"
|
|
|
|
|
|
class OpenAIImageGenerationsInput(OpenAIImageBaseInput):
|
|
prompt: str
|
|
quality: Literal["standard", "hd"] = None
|
|
style: Optional[Literal["vivid", "natural"]] = None
|
|
|
|
|
|
class OpenAIImageVariationsInput(OpenAIImageBaseInput):
|
|
image: Union[UploadFile, AnyUrl]
|
|
|
|
|
|
class OpenAIImageEditsInput(OpenAIImageVariationsInput):
|
|
prompt: str
|
|
mask: Union[UploadFile, AnyUrl]
|
|
|
|
|
|
class OpenAIAudioTranslationsInput(OpenAIBaseInput):
|
|
file: Union[UploadFile, AnyUrl]
|
|
model: str
|
|
prompt: Optional[str] = None
|
|
response_format: Optional[str] = None
|
|
temperature: float = TEMPERATURE
|
|
|
|
|
|
class OpenAIAudioTranscriptionsInput(OpenAIAudioTranslationsInput):
|
|
language: Optional[str] = None
|
|
timestamp_granularities: Optional[List[Literal["word", "segment"]]] = None
|
|
|
|
|
|
class OpenAIAudioSpeechInput(OpenAIBaseInput):
|
|
input: str
|
|
model: str
|
|
voice: str
|
|
response_format: Optional[Literal["mp3", "opus", "aac", "flac", "pcm", "wav"]] = None
|
|
speed: Optional[float] = None
|