feat(api/workflow): Add Conversation.dialogue_count
(#7275)
This commit is contained in:
@@ -10,8 +10,8 @@ from core.entities.provider_entities import CustomConfiguration, CustomProviderC
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers import ModelProviderFactory
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariable
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.nodes.llm.llm_node import LLMNode
|
||||
from extensions.ext_database import db
|
||||
@@ -236,4 +236,4 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert 'sunny' in json.dumps(result.process_data)
|
||||
assert 'what\'s the weather today?' in json.dumps(result.process_data)
|
||||
assert 'what\'s the weather today?' in json.dumps(result.process_data)
|
||||
|
@@ -12,8 +12,8 @@ from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariable
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
|
||||
from extensions.ext_database import db
|
||||
@@ -363,7 +363,7 @@ def test_extract_json_response():
|
||||
{
|
||||
"location": "kawaii"
|
||||
}
|
||||
hello world.
|
||||
hello world.
|
||||
""")
|
||||
|
||||
assert result['location'] == 'kawaii'
|
||||
@@ -445,4 +445,4 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
||||
assert latest_role != prompt.get('role')
|
||||
|
||||
if prompt.get('role') in ['user', 'assistant']:
|
||||
latest_role = prompt.get('role')
|
||||
latest_role = prompt.get('role')
|
||||
|
Reference in New Issue
Block a user