refactor(question_classifier): improve error handling with custom exceptions (#10365)
This commit is contained in:
6
api/core/workflow/nodes/question_classifier/exc.py
Normal file
6
api/core/workflow/nodes/question_classifier/exc.py
Normal file
@@ -0,0 +1,6 @@
|
||||
class QuestionClassifierNodeError(ValueError):
|
||||
"""Base class for QuestionClassifierNode errors."""
|
||||
|
||||
|
||||
class InvalidModelTypeError(QuestionClassifierNodeError):
|
||||
"""Raised when the model is not a Large Language Model."""
|
@@ -4,6 +4,7 @@ from collections.abc import Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.llm_generator.output_parser.errors import OutputParserError
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities import LLMUsage, ModelPropertyKey, PromptMessageRole
|
||||
@@ -24,6 +25,7 @@ from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
from .entities import QuestionClassifierNodeData
|
||||
from .exc import InvalidModelTypeError
|
||||
from .template_prompts import (
|
||||
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1,
|
||||
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2,
|
||||
@@ -124,7 +126,7 @@ class QuestionClassifierNode(LLMNode):
|
||||
category_name = classes_map[category_id_result]
|
||||
category_id = category_id_result
|
||||
|
||||
except Exception:
|
||||
except OutputParserError:
|
||||
logging.error(f"Failed to parse result text: {result_text}")
|
||||
try:
|
||||
process_data = {
|
||||
@@ -309,4 +311,4 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Model mode {model_mode} not support.")
|
||||
raise InvalidModelTypeError(f"Model mode {model_mode} not support.")
|
||||
|
Reference in New Issue
Block a user