diff --git a/ktransformers/server/schemas/endpoints/chat.py b/ktransformers/server/schemas/endpoints/chat.py index 5507266..821b3b9 100644 --- a/ktransformers/server/schemas/endpoints/chat.py +++ b/ktransformers/server/schemas/endpoints/chat.py @@ -25,10 +25,9 @@ class ChatCompletionCreate(BaseModel): messages: List[Message] model : str stream : bool = False - temperature: Optional[float] - top_p: Optional[float] - frequency_penalty: Optional[float] - + temperature: Optional[float] = None + top_p: Optional[float] = None + def get_tokenizer_messages(self): return [m.to_tokenizer_message() for m in self.messages] diff --git a/ktransformers/server/schemas/legacy/completions.py b/ktransformers/server/schemas/legacy/completions.py index ca4b89c..ea936ea 100644 --- a/ktransformers/server/schemas/legacy/completions.py +++ b/ktransformers/server/schemas/legacy/completions.py @@ -9,9 +9,8 @@ class CompletionCreate(BaseModel): model: str prompt: str | List[str] stream: bool = False - temperature: Optional[float] - top_p: Optional[float] - frequency_penalty: Optional[float] + temperature: Optional[float] = None + top_p: Optional[float] = None def get_tokenizer_messages(self): if isinstance(self.prompt,List):