fix: respect resolution settings for vision for basic chatbot, text generator, and parameter extractor node (#16041)

This commit is contained in:
kurokobo
2025-03-18 15:37:07 +09:00
committed by GitHub
parent e91531fc23
commit 86d3fff666
6 changed files with 84 additions and 11 deletions

View File

@@ -7,6 +7,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti
from core.file import File
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities import ImagePromptMessageContent
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
@@ -129,6 +130,7 @@ class ParameterExtractorNode(LLMNode):
model_config=model_config,
memory=memory,
files=files,
vision_detail=node_data.vision.configs.detail,
)
else:
# use prompt engineering
@@ -139,6 +141,7 @@ class ParameterExtractorNode(LLMNode):
model_config=model_config,
memory=memory,
files=files,
vision_detail=node_data.vision.configs.detail,
)
prompt_message_tools = []
@@ -267,6 +270,7 @@ class ParameterExtractorNode(LLMNode):
model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory],
files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
"""
Generate function call prompt.
@@ -289,6 +293,7 @@ class ParameterExtractorNode(LLMNode):
memory_config=node_data.memory,
memory=None,
model_config=model_config,
image_detail_config=vision_detail,
)
# find last user message
@@ -347,6 +352,7 @@ class ParameterExtractorNode(LLMNode):
model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory],
files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]:
"""
Generate prompt engineering prompt.
@@ -361,6 +367,7 @@ class ParameterExtractorNode(LLMNode):
model_config=model_config,
memory=memory,
files=files,
vision_detail=vision_detail,
)
elif model_mode == ModelMode.CHAT:
return self._generate_prompt_engineering_chat_prompt(
@@ -370,6 +377,7 @@ class ParameterExtractorNode(LLMNode):
model_config=model_config,
memory=memory,
files=files,
vision_detail=vision_detail,
)
else:
raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
@@ -382,6 +390,7 @@ class ParameterExtractorNode(LLMNode):
model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory],
files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]:
"""
Generate completion prompt.
@@ -402,6 +411,7 @@ class ParameterExtractorNode(LLMNode):
memory_config=node_data.memory,
memory=memory,
model_config=model_config,
image_detail_config=vision_detail,
)
return prompt_messages
@@ -414,6 +424,7 @@ class ParameterExtractorNode(LLMNode):
model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory],
files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]:
"""
Generate chat prompt.
@@ -441,6 +452,7 @@ class ParameterExtractorNode(LLMNode):
memory_config=node_data.memory,
memory=None,
model_config=model_config,
image_detail_config=vision_detail,
)
# find last user message