feat: Parallel Execution of Nodes in Workflows (#8192)

Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: Yi <yxiaoisme@gmail.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
takatost
2024-09-10 15:23:16 +08:00
committed by GitHub
parent 5da0182800
commit dabfd74622
156 changed files with 11158 additions and 5605 deletions

View File

@@ -1,17 +1,72 @@
import time
import uuid
from os import getenv
from typing import cast
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import NodeRunResult, UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.base_node import UserFrom
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.code.code_node import CodeNode
from models.workflow import WorkflowNodeExecutionStatus
from core.workflow.nodes.code.entities import CodeNodeData
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
CODE_MAX_STRING_LENGTH = int(getenv("CODE_MAX_STRING_LENGTH", "10000"))
def init_code_node(code_config: dict):
graph_config = {
"edges": [
{
"id": "start-source-code-target",
"source": "start",
"target": "code",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, code_config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["code", "123", "args1"], 1)
variable_pool.add(["code", "123", "args2"], 2)
node = CodeNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=code_config,
)
return node
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
def test_execute_code(setup_code_executor_mock):
code = """
@@ -22,44 +77,36 @@ def test_execute_code(setup_code_executor_mock):
"""
# trim first 4 spaces at the beginning of each line
code = "\n".join([line[4:] for line in code.split("\n")])
node = CodeNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.WEB_APP,
config={
"id": "1",
"data": {
"outputs": {
"result": {
"type": "number",
},
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
},
)
# construct variable pool
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["1", "123", "args1"], 1)
pool.add(["1", "123", "args2"], 2)
code_config = {
"id": "code",
"data": {
"outputs": {
"result": {
"type": "number",
},
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
}
node = init_code_node(code_config)
# execute node
result = node.run(pool)
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs["result"] == 3
assert result.error is None
@@ -74,44 +121,34 @@ def test_execute_code_output_validator(setup_code_executor_mock):
"""
# trim first 4 spaces at the beginning of each line
code = "\n".join([line[4:] for line in code.split("\n")])
node = CodeNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.WEB_APP,
config={
"id": "1",
"data": {
"outputs": {
"result": {
"type": "string",
},
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
},
)
# construct variable pool
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["1", "123", "args1"], 1)
pool.add(["1", "123", "args2"], 2)
code_config = {
"id": "code",
"data": {
"outputs": {
"result": {
"type": "string",
},
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
}
node = init_code_node(code_config)
# execute node
result = node.run(pool)
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.FAILED
assert result.error == "Output variable `result` must be a string"
@@ -127,65 +164,60 @@ def test_execute_code_output_validator_depth():
"""
# trim first 4 spaces at the beginning of each line
code = "\n".join([line[4:] for line in code.split("\n")])
node = CodeNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.WEB_APP,
config={
"id": "1",
"data": {
"outputs": {
"string_validator": {
"type": "string",
},
"number_validator": {
"type": "number",
},
"number_array_validator": {
"type": "array[number]",
},
"string_array_validator": {
"type": "array[string]",
},
"object_validator": {
"type": "object",
"children": {
"result": {
"type": "number",
},
"depth": {
"type": "object",
"children": {
"depth": {
"type": "object",
"children": {
"depth": {
"type": "number",
}
},
}
},
code_config = {
"id": "code",
"data": {
"outputs": {
"string_validator": {
"type": "string",
},
"number_validator": {
"type": "number",
},
"number_array_validator": {
"type": "array[number]",
},
"string_array_validator": {
"type": "array[string]",
},
"object_validator": {
"type": "object",
"children": {
"result": {
"type": "number",
},
"depth": {
"type": "object",
"children": {
"depth": {
"type": "object",
"children": {
"depth": {
"type": "number",
}
},
}
},
},
},
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
)
}
node = init_code_node(code_config)
# construct result
result = {
@@ -196,6 +228,8 @@ def test_execute_code_output_validator_depth():
"object_validator": {"result": 1, "depth": {"depth": {"depth": 1}}},
}
node.node_data = cast(CodeNodeData, node.node_data)
# validate
node._transform_result(result, node.node_data.outputs)
@@ -250,35 +284,30 @@ def test_execute_code_output_object_list():
"""
# trim first 4 spaces at the beginning of each line
code = "\n".join([line[4:] for line in code.split("\n")])
node = CodeNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
config={
"id": "1",
"data": {
"outputs": {
"object_list": {
"type": "array[object]",
},
code_config = {
"id": "code",
"data": {
"outputs": {
"object_list": {
"type": "array[object]",
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"answer": "123",
"code_language": "python3",
"code": code,
},
)
}
node = init_code_node(code_config)
# construct result
result = {
@@ -295,6 +324,8 @@ def test_execute_code_output_object_list():
]
}
node.node_data = cast(CodeNodeData, node.node_data)
# validate
node._transform_result(result, node.node_data.outputs)

View File

@@ -1,31 +1,69 @@
import time
import uuid
from urllib.parse import urlencode
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.base_node import UserFrom
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.http_request.http_request_node import HttpRequestNode
from models.workflow import WorkflowType
from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock
BASIC_NODE_DATA = {
"tenant_id": "1",
"app_id": "1",
"workflow_id": "1",
"user_id": "1",
"user_from": UserFrom.ACCOUNT,
"invoke_from": InvokeFrom.WEB_APP,
}
# construct variable pool
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["a", "b123", "args1"], 1)
pool.add(["a", "b123", "args2"], 2)
def init_http_node(config: dict):
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "1",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["a", "b123", "args1"], 1)
variable_pool.add(["a", "b123", "args2"], 2)
return HttpRequestNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_get(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -45,12 +83,11 @@ def test_get(setup_http_mock):
"params": "A:b",
"body": None,
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "?A=b" in data
@@ -59,7 +96,7 @@ def test_get(setup_http_mock):
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_no_auth(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -75,12 +112,11 @@ def test_no_auth(setup_http_mock):
"params": "A:b",
"body": None,
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "?A=b" in data
@@ -89,7 +125,7 @@ def test_no_auth(setup_http_mock):
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_custom_authorization_header(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -109,12 +145,11 @@ def test_custom_authorization_header(setup_http_mock):
"params": "A:b",
"body": None,
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "?A=b" in data
@@ -123,7 +158,7 @@ def test_custom_authorization_header(setup_http_mock):
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_template(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -143,11 +178,11 @@ def test_template(setup_http_mock):
"params": "A:b\nTemplate:{{#a.b123.args2#}}",
"body": None,
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "?A=b" in data
@@ -158,7 +193,7 @@ def test_template(setup_http_mock):
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_json(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -178,11 +213,11 @@ def test_json(setup_http_mock):
"params": "A:b",
"body": {"type": "json", "data": '{"a": "{{#a.b123.args1#}}"}'},
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert '{"a": "1"}' in data
@@ -190,7 +225,7 @@ def test_json(setup_http_mock):
def test_x_www_form_urlencoded(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -210,11 +245,11 @@ def test_x_www_form_urlencoded(setup_http_mock):
"params": "A:b",
"body": {"type": "x-www-form-urlencoded", "data": "a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}"},
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "a=1&b=2" in data
@@ -222,7 +257,7 @@ def test_x_www_form_urlencoded(setup_http_mock):
def test_form_data(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -242,11 +277,11 @@ def test_form_data(setup_http_mock):
"params": "A:b",
"body": {"type": "form-data", "data": "a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}"},
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert 'form-data; name="a"' in data
@@ -257,7 +292,7 @@ def test_form_data(setup_http_mock):
def test_none_data(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -277,11 +312,11 @@ def test_none_data(setup_http_mock):
"params": "A:b",
"body": {"type": "none", "data": "123123123"},
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
assert "X-Header: 123" in data
@@ -289,7 +324,7 @@ def test_none_data(setup_http_mock):
def test_mock_404(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -305,19 +340,19 @@ def test_mock_404(setup_http_mock):
"params": "",
"headers": "X-Header:123",
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.outputs is not None
resp = result.outputs
assert 404 == resp.get("status_code")
assert "Not Found" in resp.get("body")
assert "Not Found" in resp.get("body", "")
def test_multi_colons_parse(setup_http_mock):
node = HttpRequestNode(
node = init_http_node(
config={
"id": "1",
"data": {
@@ -333,13 +368,14 @@ def test_multi_colons_parse(setup_http_mock):
"headers": "Referer:http://example3.com\nRedirect:http://example4.com",
"body": {"type": "form-data", "data": "Referer:http://example5.com\nRedirect:http://example6.com"},
},
},
**BASIC_NODE_DATA,
}
)
result = node.run(pool)
result = node._run()
assert result.process_data is not None
assert result.outputs is not None
resp = result.outputs
assert urlencode({"Redirect": "http://example2.com"}) in result.process_data.get("request")
assert 'form-data; name="Redirect"\n\nhttp://example6.com' in result.process_data.get("request")
assert "http://example3.com" == resp.get("headers").get("referer")
assert urlencode({"Redirect": "http://example2.com"}) in result.process_data.get("request", "")
assert 'form-data; name="Redirect"\n\nhttp://example6.com' in result.process_data.get("request", "")
assert "http://example3.com" == resp.get("headers", {}).get("referer")

View File

@@ -1,5 +1,8 @@
import json
import os
import time
import uuid
from collections.abc import Generator
from unittest.mock import MagicMock
import pytest
@@ -10,28 +13,77 @@ from core.entities.provider_entities import CustomConfiguration, CustomProviderC
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import ModelProviderFactory
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes.base_node import UserFrom
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.event import RunCompletedEvent
from core.workflow.nodes.llm.llm_node import LLMNode
from extensions.ext_database import db
from models.provider import ProviderType
from models.workflow import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
node = LLMNode(
def init_llm_node(config: dict) -> LLMNode:
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "llm",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["abc", "output"], "sunny")
node = LLMNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
return node
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
node = init_llm_node(
config={
"id": "llm",
"data": {
@@ -49,19 +101,6 @@ def test_execute_llm(setup_openai_mock):
},
)
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
@@ -80,13 +119,15 @@ def test_execute_llm(setup_openai_mock):
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
@@ -96,11 +137,16 @@ def test_execute_llm(setup_openai_mock):
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)
result = node._run()
assert isinstance(result, Generator)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs["text"] is not None
assert result.outputs["usage"]["total_tokens"] > 0
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.process_data is not None
assert item.run_result.outputs is not None
assert item.run_result.outputs.get("text") is not None
assert item.run_result.outputs.get("usage", {})["total_tokens"] > 0
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
@@ -109,13 +155,7 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
"""
Test execute LLM node with jinja2
"""
node = LLMNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_llm_node(
config={
"id": "llm",
"data": {
@@ -149,19 +189,6 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
},
)
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
@@ -181,14 +208,15 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
@@ -198,8 +226,11 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert "sunny" in json.dumps(result.process_data)
assert "what's the weather today?" in json.dumps(result.process_data)
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.process_data is not None
assert "sunny" in json.dumps(item.run_result.process_data)
assert "what's the weather today?" in json.dumps(item.run_result.process_data)

View File

@@ -1,5 +1,7 @@
import json
import os
import time
import uuid
from typing import Optional
from unittest.mock import MagicMock
@@ -8,19 +10,21 @@ import pytest
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes.base_node import UserFrom
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
from extensions.ext_database import db
from models.provider import ProviderType
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from models.workflow import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
@@ -47,13 +51,15 @@ def get_mocked_fetch_model_config(
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
model_schema = model_type_instance.get_model_schema(model)
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model=model,
provider=provider,
mode=mode,
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema(model),
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
@@ -74,18 +80,62 @@ def get_mocked_fetch_memory(memory_text: str):
return MagicMock(return_value=MemoryMock())
def init_parameter_extractor_node(config: dict):
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "llm",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["a", "b123", "args1"], 1)
variable_pool.add(["a", "b123", "args2"], 2)
return ParameterExtractorNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_function_calling_parameter_extractor(setup_openai_mock):
"""
Test function calling for parameter extractor.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -98,7 +148,7 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
"reasoning_mode": "function_call",
"memory": None,
},
},
}
)
node._fetch_model_config = get_mocked_fetch_model_config(
@@ -121,9 +171,10 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
environment_variables=[],
)
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs.get("location") == "kawaii"
assert result.outputs.get("__reason") == None
@@ -133,13 +184,7 @@ def test_instructions(setup_openai_mock):
"""
Test chat parameter extractor.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -163,29 +208,19 @@ def test_instructions(setup_openai_mock):
)
db.session.close = MagicMock()
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs.get("location") == "kawaii"
assert result.outputs.get("__reason") == None
process_data = result.process_data
assert process_data is not None
process_data.get("prompts")
for prompt in process_data.get("prompts"):
for prompt in process_data.get("prompts", []):
if prompt.get("role") == "system":
assert "what's the weather in SF" in prompt.get("text")
@@ -195,13 +230,7 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
"""
Test chat parameter extractor.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -225,27 +254,17 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
)
db.session.close = MagicMock()
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs.get("location") == ""
assert (
result.outputs.get("__reason")
== "Failed to extract result from function call or text response, using empty result."
)
prompts = result.process_data.get("prompts")
assert result.process_data is not None
prompts = result.process_data.get("prompts", [])
for prompt in prompts:
if prompt.get("role") == "user":
@@ -258,13 +277,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
"""
Test completion parameter extractor.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -293,28 +306,18 @@ def test_completion_parameter_extractor(setup_openai_mock):
)
db.session.close = MagicMock()
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs.get("location") == ""
assert (
result.outputs.get("__reason")
== "Failed to extract result from function call or text response, using empty result."
)
assert len(result.process_data.get("prompts")) == 1
assert "SF" in result.process_data.get("prompts")[0].get("text")
assert result.process_data is not None
assert len(result.process_data.get("prompts", [])) == 1
assert "SF" in result.process_data.get("prompts", [])[0].get("text")
def test_extract_json_response():
@@ -322,13 +325,7 @@ def test_extract_json_response():
Test extract json response.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -357,6 +354,7 @@ def test_extract_json_response():
hello world.
""")
assert result is not None
assert result["location"] == "kawaii"
@@ -365,13 +363,7 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
"""
Test chat parameter extractor with memory.
"""
node = ParameterExtractorNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_parameter_extractor_node(
config={
"id": "llm",
"data": {
@@ -396,27 +388,17 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
node._fetch_memory = get_mocked_fetch_memory("customized memory")
db.session.close = MagicMock()
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs.get("location") == ""
assert (
result.outputs.get("__reason")
== "Failed to extract result from function call or text response, using empty result."
)
prompts = result.process_data.get("prompts")
assert result.process_data is not None
prompts = result.process_data.get("prompts", [])
latest_role = None
for prompt in prompts:

View File

@@ -1,46 +1,84 @@
import time
import uuid
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.base_node import UserFrom
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode
from models.workflow import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
def test_execute_code(setup_code_executor_mock):
code = """{{args2}}"""
node = TemplateTransformNode(
config = {
"id": "1",
"data": {
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"template": code,
},
}
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "1",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.END_USER,
config={
"id": "1",
"data": {
"title": "123",
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
],
"template": code,
},
},
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["1", "123", "args1"], 1)
pool.add(["1", "123", "args2"], 3)
variable_pool = VariablePool(
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["1", "123", "args1"], 1)
variable_pool.add(["1", "123", "args2"], 3)
node = TemplateTransformNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
# execute node
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert result.outputs["output"] == "3"

View File

@@ -1,21 +1,62 @@
import time
import uuid
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import NodeRunResult, UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.base_node import UserFrom
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.tool.tool_node import ToolNode
from models.workflow import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
def init_tool_node(config: dict):
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "1",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
return ToolNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
def test_tool_variable_invoke():
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["1", "123", "args1"], "1+1")
node = ToolNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_tool_node(
config={
"id": "1",
"data": {
@@ -34,28 +75,22 @@ def test_tool_variable_invoke():
}
},
},
},
}
)
# execute node
result = node.run(pool)
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
# execute node
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert "2" in result.outputs["text"]
assert result.outputs["files"] == []
def test_tool_mixed_invoke():
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
pool.add(["1", "args1"], "1+1")
node = ToolNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_tool_node(
config={
"id": "1",
"data": {
@@ -74,12 +109,15 @@ def test_tool_mixed_invoke():
}
},
},
},
}
)
# execute node
result = node.run(pool)
node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
# execute node
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert "2" in result.outputs["text"]
assert result.outputs["files"] == []