mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-02-09 08:45:44 +08:00
parent
b3dee0b1d1
commit
5169228b86
@ -1,3 +1,4 @@
|
|||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
from collections.abc import Generator
|
from collections.abc import Generator
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
@ -5,13 +6,14 @@ from typing import Optional, Union
|
|||||||
import google.api_core.exceptions as exceptions
|
import google.api_core.exceptions as exceptions
|
||||||
import google.generativeai as genai
|
import google.generativeai as genai
|
||||||
import google.generativeai.client as client
|
import google.generativeai.client as client
|
||||||
|
from google.ai.generativelanguage_v1beta import FunctionCall, FunctionResponse
|
||||||
from google.generativeai.types import (
|
from google.generativeai.types import (
|
||||||
ContentType,
|
ContentType,
|
||||||
GenerateContentResponse,
|
GenerateContentResponse,
|
||||||
HarmBlockThreshold,
|
HarmBlockThreshold,
|
||||||
HarmCategory,
|
HarmCategory,
|
||||||
)
|
)
|
||||||
from google.generativeai.types.content_types import to_part
|
from google.generativeai.types.content_types import to_part, FunctionDeclaration, Tool, FunctionLibrary
|
||||||
|
|
||||||
from model_providers.core.model_runtime.entities.llm_entities import (
|
from model_providers.core.model_runtime.entities.llm_entities import (
|
||||||
LLMResult,
|
LLMResult,
|
||||||
@ -81,7 +83,7 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
"""
|
"""
|
||||||
# invoke model
|
# invoke model
|
||||||
return self._generate(
|
return self._generate(
|
||||||
model, credentials, prompt_messages, model_parameters, stop, stream, user
|
model, credentials, prompt_messages, model_parameters, tools, stop, stream, user
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_num_tokens(
|
def get_num_tokens(
|
||||||
@ -143,6 +145,7 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
credentials: dict,
|
credentials: dict,
|
||||||
prompt_messages: list[PromptMessage],
|
prompt_messages: list[PromptMessage],
|
||||||
model_parameters: dict,
|
model_parameters: dict,
|
||||||
|
tools: Optional[list[PromptMessageTool]] = None,
|
||||||
stop: Optional[list[str]] = None,
|
stop: Optional[list[str]] = None,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
user: Optional[str] = None,
|
user: Optional[str] = None,
|
||||||
@ -160,9 +163,13 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
:return: full response or stream response chunk generator result
|
:return: full response or stream response chunk generator result
|
||||||
"""
|
"""
|
||||||
config_kwargs = model_parameters.copy()
|
config_kwargs = model_parameters.copy()
|
||||||
config_kwargs["max_output_tokens"] = config_kwargs.pop(
|
config_kwargs.pop(
|
||||||
"max_tokens_to_sample", None
|
"max_tokens_to_sample", None
|
||||||
)
|
)
|
||||||
|
# https://github.com/google/generative-ai-python/issues/170
|
||||||
|
# config_kwargs["max_output_tokens"] = config_kwargs.pop(
|
||||||
|
# "max_tokens_to_sample", None
|
||||||
|
# )
|
||||||
|
|
||||||
if stop:
|
if stop:
|
||||||
config_kwargs["stop_sequences"] = stop
|
config_kwargs["stop_sequences"] = stop
|
||||||
@ -197,12 +204,21 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
||||||
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
||||||
}
|
}
|
||||||
|
tools_one = []
|
||||||
|
for tool in tools:
|
||||||
|
one_tool = Tool(function_declarations=[FunctionDeclaration(name=tool.name,
|
||||||
|
description=tool.description,
|
||||||
|
parameters=tool.parameters
|
||||||
|
)
|
||||||
|
])
|
||||||
|
tools_one.append(one_tool)
|
||||||
|
|
||||||
response = google_model.generate_content(
|
response = google_model.generate_content(
|
||||||
contents=history,
|
contents=history,
|
||||||
generation_config=genai.types.GenerationConfig(**config_kwargs),
|
generation_config=genai.types.GenerationConfig(**config_kwargs),
|
||||||
stream=stream,
|
stream=stream,
|
||||||
safety_settings=safety_settings,
|
safety_settings=safety_settings,
|
||||||
|
tools=FunctionLibrary(tools=tools_one),
|
||||||
)
|
)
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
@ -230,8 +246,23 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
:param prompt_messages: prompt messages
|
:param prompt_messages: prompt messages
|
||||||
:return: llm response
|
:return: llm response
|
||||||
"""
|
"""
|
||||||
|
part = response.candidates[0].content.parts[0]
|
||||||
|
part_message_function_call = part.function_call
|
||||||
|
tool_calls = []
|
||||||
|
if part_message_function_call:
|
||||||
|
function_call = self._extract_response_function_call(
|
||||||
|
part_message_function_call
|
||||||
|
)
|
||||||
|
tool_calls.append(function_call)
|
||||||
|
part_message_function_response = part.function_response
|
||||||
|
if part_message_function_response:
|
||||||
|
function_call = self._extract_response_function_call(
|
||||||
|
part_message_function_call
|
||||||
|
)
|
||||||
|
tool_calls.append(function_call)
|
||||||
|
|
||||||
# transform assistant message to prompt message
|
# transform assistant message to prompt message
|
||||||
assistant_prompt_message = AssistantPromptMessage(content=response.text)
|
assistant_prompt_message = AssistantPromptMessage(content=part.text, tool_calls=tool_calls)
|
||||||
|
|
||||||
# calculate num tokens
|
# calculate num tokens
|
||||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||||
@ -413,3 +444,37 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
exceptions.Cancelled,
|
exceptions.Cancelled,
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _extract_response_function_call(
|
||||||
|
self, response_function_call: Union[FunctionCall, FunctionResponse]
|
||||||
|
) -> AssistantPromptMessage.ToolCall:
|
||||||
|
"""
|
||||||
|
Extract function call from response
|
||||||
|
|
||||||
|
:param response_function_call: response function call
|
||||||
|
:return: tool call
|
||||||
|
"""
|
||||||
|
tool_call = None
|
||||||
|
if response_function_call:
|
||||||
|
from google.protobuf import json_format
|
||||||
|
|
||||||
|
if isinstance(response_function_call, FunctionCall):
|
||||||
|
map_composite_dict = dict(response_function_call.args.items())
|
||||||
|
function = AssistantPromptMessage.ToolCall.ToolCallFunction(
|
||||||
|
name=response_function_call.name,
|
||||||
|
arguments=str(map_composite_dict),
|
||||||
|
)
|
||||||
|
elif isinstance(response_function_call, FunctionResponse):
|
||||||
|
map_composite_dict = dict(response_function_call.response.items())
|
||||||
|
function = AssistantPromptMessage.ToolCall.ToolCallFunction(
|
||||||
|
name=response_function_call.name,
|
||||||
|
arguments=str(map_composite_dict),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported response_function_call type: {type(response_function_call)}")
|
||||||
|
|
||||||
|
tool_call = AssistantPromptMessage.ToolCall(
|
||||||
|
id=response_function_call.name, type="function", function=function
|
||||||
|
)
|
||||||
|
|
||||||
|
return tool_call
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user