chore(api/core): Improve FileVar's type hint and imports. (#7290)

This commit is contained in:
-LAN-
2024-08-15 12:43:18 +08:00
committed by GitHub
parent 6ff7fd80a1
commit 8f16165f92
7 changed files with 68 additions and 59 deletions

View File

@@ -1,14 +1,13 @@
import json
from collections.abc import Generator
from copy import deepcopy
from typing import Optional, cast
from typing import TYPE_CHECKING, Optional, cast
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.app.entities.queue_entities import QueueRetrieverResourcesEvent
from core.entities.model_entities import ModelStatus
from core.entities.provider_entities import QuotaUnit
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities.llm_entities import LLMUsage
@@ -39,6 +38,10 @@ from models.model import Conversation
from models.provider import Provider, ProviderType
from models.workflow import WorkflowNodeExecutionStatus
if TYPE_CHECKING:
from core.file.file_obj import FileVar
class LLMNode(BaseNode):
_node_data_cls = LLMNodeData
@@ -71,7 +74,7 @@ class LLMNode(BaseNode):
node_inputs = {}
# fetch files
files: list[FileVar] = self._fetch_files(node_data, variable_pool)
files = self._fetch_files(node_data, variable_pool)
if files:
node_inputs['#files#'] = [file.to_dict() for file in files]
@@ -322,7 +325,7 @@ class LLMNode(BaseNode):
return inputs
def _fetch_files(self, node_data: LLMNodeData, variable_pool: VariablePool) -> list[FileVar]:
def _fetch_files(self, node_data: LLMNodeData, variable_pool: VariablePool) -> list["FileVar"]:
"""
Fetch files
:param node_data: node data
@@ -521,7 +524,7 @@ class LLMNode(BaseNode):
query: Optional[str],
query_prompt_template: Optional[str],
inputs: dict[str, str],
files: list[FileVar],
files: list["FileVar"],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity) \