fix: respect resolution settings for vision for basic chatbot, text generator, and parameter extractor node (#16041)

This commit is contained in:
kurokobo
2025-03-18 15:37:07 +09:00
committed by GitHub
parent e91531fc23
commit 86d3fff666
6 changed files with 84 additions and 11 deletions

View File

@@ -11,6 +11,7 @@ from core.app.entities.queue_entities import QueueAnnotationReplyEvent
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.moderation.base import ModerationError
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from extensions.ext_database import db
@@ -50,6 +51,16 @@ class ChatAppRunner(AppRunner):
query = application_generate_entity.query
files = application_generate_entity.files
image_detail_config = (
application_generate_entity.file_upload_config.image_config.detail
if (
application_generate_entity.file_upload_config
and application_generate_entity.file_upload_config.image_config
)
else None
)
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
# Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception.
@@ -85,6 +96,7 @@ class ChatAppRunner(AppRunner):
files=files,
query=query,
memory=memory,
image_detail_config=image_detail_config,
)
# moderation
@@ -182,6 +194,7 @@ class ChatAppRunner(AppRunner):
query=query,
context=context,
memory=memory,
image_detail_config=image_detail_config,
)
# check hosting moderation