feat(api): Add image multimodal support for LLMNode (#17372)

Enhance `LLMNode` with multimodal capability, introducing support for
image outputs.

This implementation extracts base64-encoded images from LLM responses,
saves them to the storage service, and records the file metadata in the
`ToolFile` table. In conversations, these images are rendered as
markdown-based inline images.
Additionally, the images are included in the LLMNode's output as
file variables, enabling subsequent nodes in the workflow to utilize them.

To integrate file outputs into workflows, adjustments to the frontend code
are necessary.

For multimodal output functionality, updates to related model configurations
are required. Currently, this capability has been applied exclusively to
Google's Gemini models.

Close #15814.

Signed-off-by: -LAN- <laipz8200@outlook.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
QuantumGhost
2025-04-30 17:28:02 +08:00
committed by GitHub
parent 6c9a9d344a
commit 349c3cf7b8
24 changed files with 971 additions and 191 deletions

View File

@@ -1,4 +1,5 @@
from collections.abc import Sequence
from abc import ABC
from collections.abc import Mapping, Sequence
from enum import Enum, StrEnum
from typing import Annotated, Any, Literal, Optional, Union
@@ -60,8 +61,12 @@ class PromptMessageContentType(StrEnum):
DOCUMENT = "document"
class PromptMessageContent(BaseModel):
pass
class PromptMessageContent(ABC, BaseModel):
"""
Model class for prompt message content.
"""
type: PromptMessageContentType
class TextPromptMessageContent(PromptMessageContent):
@@ -125,7 +130,16 @@ PromptMessageContentUnionTypes = Annotated[
]
class PromptMessage(BaseModel):
CONTENT_TYPE_MAPPING: Mapping[PromptMessageContentType, type[PromptMessageContent]] = {
PromptMessageContentType.TEXT: TextPromptMessageContent,
PromptMessageContentType.IMAGE: ImagePromptMessageContent,
PromptMessageContentType.AUDIO: AudioPromptMessageContent,
PromptMessageContentType.VIDEO: VideoPromptMessageContent,
PromptMessageContentType.DOCUMENT: DocumentPromptMessageContent,
}
class PromptMessage(ABC, BaseModel):
"""
Model class for prompt message.
"""
@@ -142,6 +156,23 @@ class PromptMessage(BaseModel):
"""
return not self.content
@field_validator("content", mode="before")
@classmethod
def validate_content(cls, v):
if isinstance(v, list):
prompts = []
for prompt in v:
if isinstance(prompt, PromptMessageContent):
if not isinstance(prompt, TextPromptMessageContent | MultiModalPromptMessageContent):
prompt = CONTENT_TYPE_MAPPING[prompt.type].model_validate(prompt.model_dump())
elif isinstance(prompt, dict):
prompt = CONTENT_TYPE_MAPPING[prompt["type"]].model_validate(prompt)
else:
raise ValueError(f"invalid prompt message {prompt}")
prompts.append(prompt)
return prompts
return v
@field_serializer("content")
def serialize_content(
self, content: Optional[Union[str, Sequence[PromptMessageContent]]]

View File

@@ -2,7 +2,7 @@ import logging
import time
import uuid
from collections.abc import Generator, Sequence
from typing import Optional, Union, cast
from typing import Optional, Union
from pydantic import ConfigDict
@@ -13,14 +13,15 @@ from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk,
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageContentUnionTypes,
PromptMessageTool,
TextPromptMessageContent,
)
from core.model_runtime.entities.model_entities import (
ModelType,
PriceType,
)
from core.model_runtime.model_providers.__base.ai_model import AIModel
from core.model_runtime.utils.helper import convert_llm_result_chunk_to_str
from core.plugin.impl.model import PluginModelClient
logger = logging.getLogger(__name__)
@@ -238,7 +239,7 @@ class LargeLanguageModel(AIModel):
def _invoke_result_generator(
self,
model: str,
result: Generator,
result: Generator[LLMResultChunk, None, None],
credentials: dict,
prompt_messages: Sequence[PromptMessage],
model_parameters: dict,
@@ -255,11 +256,21 @@ class LargeLanguageModel(AIModel):
:return: result generator
"""
callbacks = callbacks or []
assistant_message = AssistantPromptMessage(content="")
message_content: list[PromptMessageContentUnionTypes] = []
usage = None
system_fingerprint = None
real_model = model
def _update_message_content(content: str | list[PromptMessageContentUnionTypes] | None):
if not content:
return
if isinstance(content, list):
message_content.extend(content)
return
if isinstance(content, str):
message_content.append(TextPromptMessageContent(data=content))
return
try:
for chunk in result:
# Following https://github.com/langgenius/dify/issues/17799,
@@ -281,9 +292,8 @@ class LargeLanguageModel(AIModel):
callbacks=callbacks,
)
text = convert_llm_result_chunk_to_str(chunk.delta.message.content)
current_content = cast(str, assistant_message.content)
assistant_message.content = current_content + text
_update_message_content(chunk.delta.message.content)
real_model = chunk.model
if chunk.delta.usage:
usage = chunk.delta.usage
@@ -293,6 +303,7 @@ class LargeLanguageModel(AIModel):
except Exception as e:
raise self._transform_invoke_error(e)
assistant_message = AssistantPromptMessage(content=message_content)
self._trigger_after_invoke_callbacks(
model=model,
result=LLMResult(

View File

@@ -1,8 +1,6 @@
import pydantic
from pydantic import BaseModel
from core.model_runtime.entities.message_entities import PromptMessageContentUnionTypes
def dump_model(model: BaseModel) -> dict:
if hasattr(pydantic, "model_dump"):
@@ -10,18 +8,3 @@ def dump_model(model: BaseModel) -> dict:
return pydantic.model_dump(model) # type: ignore
else:
return model.model_dump()
def convert_llm_result_chunk_to_str(content: None | str | list[PromptMessageContentUnionTypes]) -> str:
if content is None:
message_text = ""
elif isinstance(content, str):
message_text = content
elif isinstance(content, list):
# Assuming the list contains PromptMessageContent objects with a "data" attribute
message_text = "".join(
item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
)
else:
message_text = str(content)
return message_text