Chore/code format and Repair commit_id 3254018d
more deleted codes and Fix naming error ambiguity between workflow_run_id and workflow_id (#17075)
Co-authored-by: 刘江波 <jiangbo721@163.com>
This commit is contained in:
@@ -44,6 +44,7 @@ from core.app.entities.task_entities import (
|
||||
WorkflowFinishStreamResponse,
|
||||
WorkflowStartStreamResponse,
|
||||
)
|
||||
from core.app.task_pipeline.exc import WorkflowRunNotFoundError
|
||||
from core.file import FILE_MODEL_IDENTITY, File
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.ops.entities.trace_entity import TraceTaskName
|
||||
@@ -66,8 +67,6 @@ from models.workflow import (
|
||||
WorkflowRunStatus,
|
||||
)
|
||||
|
||||
from .exc import WorkflowRunNotFoundError
|
||||
|
||||
|
||||
class WorkflowCycleManage:
|
||||
def __init__(
|
||||
@@ -166,7 +165,7 @@ class WorkflowCycleManage:
|
||||
|
||||
outputs = WorkflowEntry.handle_special_values(outputs)
|
||||
|
||||
workflow_run.status = WorkflowRunStatus.SUCCEEDED.value
|
||||
workflow_run.status = WorkflowRunStatus.SUCCEEDED
|
||||
workflow_run.outputs = json.dumps(outputs or {})
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
@@ -201,7 +200,7 @@ class WorkflowCycleManage:
|
||||
workflow_run = self._get_workflow_run(session=session, workflow_run_id=workflow_run_id)
|
||||
outputs = WorkflowEntry.handle_special_values(dict(outputs) if outputs else None)
|
||||
|
||||
workflow_run.status = WorkflowRunStatus.PARTIAL_SUCCESSED.value
|
||||
workflow_run.status = WorkflowRunStatus.PARTIAL_SUCCEEDED.value
|
||||
workflow_run.outputs = json.dumps(outputs or {})
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
|
@@ -1,7 +1,7 @@
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
|
||||
|
||||
class BuiltInField(str, Enum):
|
||||
class BuiltInField(StrEnum):
|
||||
document_name = "document_name"
|
||||
uploader = "uploader"
|
||||
upload_date = "upload_date"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class IndexType(str, Enum):
|
||||
class IndexType(StrEnum):
|
||||
PARAGRAPH_INDEX = "text_model"
|
||||
QA_INDEX = "qa_model"
|
||||
PARENT_CHILD_INDEX = "hierarchical_model"
|
||||
|
@@ -39,6 +39,8 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
|
||||
all_documents = [] # type: ignore
|
||||
if rules.parent_mode == ParentMode.PARAGRAPH:
|
||||
# Split the text documents into nodes.
|
||||
if not rules.segmentation:
|
||||
raise ValueError("No segmentation found in rules.")
|
||||
splitter = self._get_splitter(
|
||||
processing_rule_mode=process_rule.get("mode"),
|
||||
max_tokens=rules.segmentation.max_tokens,
|
||||
|
Reference in New Issue
Block a user