diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 4847a2cab..b46292305 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -1,3 +1,5 @@ +from collections.abc import Sequence + from flask_login import current_user from flask_restful import Resource, reqparse @@ -107,6 +109,114 @@ class RuleStructuredOutputGenerateApi(Resource): return structured_output +class InstructionGenerateApi(Resource): + @setup_required + @login_required + @account_initialization_required + def post(self): + parser = reqparse.RequestParser() + parser.add_argument("flow_id", type=str, required=True, default="", location="json") + parser.add_argument("node_id", type=str, required=False, default="", location="json") + parser.add_argument("current", type=str, required=False, default="", location="json") + parser.add_argument("language", type=str, required=False, default="javascript", location="json") + parser.add_argument("instruction", type=str, required=True, nullable=False, location="json") + parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json") + parser.add_argument("ideal_output", type=str, required=False, default="", location="json") + args = parser.parse_args() + + try: + if args["current"] == "" and args["node_id"] != "": # Generate from nothing for a workflow node + from models import App, db + from services.workflow_service import WorkflowService + + app = db.session.query(App).filter(App.id == args["flow_id"]).first() + if not app: + return {"error": f"app {args['flow_id']} not found"}, 400 + workflow = WorkflowService().get_draft_workflow(app_model=app) + if not workflow: + return {"error": f"workflow {args['flow_id']} not found"}, 400 + nodes: Sequence = workflow.graph_dict["nodes"] + node = [node for node in nodes if node["id"] == args["node_id"]] + if len(node) == 0: + return {"error": f"node {args['node_id']} not found"}, 400 + node_type = node[0]["data"]["type"] + match node_type: + case "llm": + return LLMGenerator.generate_rule_config( + current_user.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + no_variable=True, + ) + case "agent": + return LLMGenerator.generate_rule_config( + current_user.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + no_variable=True, + ) + case "code": + return LLMGenerator.generate_code( + tenant_id=current_user.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + code_language=args["language"], + ) + case _: + return {"error": f"invalid node type: {node_type}"} + if args["node_id"] == "" and args["current"] != "": # For legacy app without a workflow + return LLMGenerator.instruction_modify_legacy( + tenant_id=current_user.current_tenant_id, + flow_id=args["flow_id"], + current=args["current"], + instruction=args["instruction"], + model_config=args["model_config"], + ideal_output=args["ideal_output"], + ) + if args["node_id"] != "" and args["current"] != "": # For workflow node + return LLMGenerator.instruction_modify_workflow( + tenant_id=current_user.current_tenant_id, + flow_id=args["flow_id"], + node_id=args["node_id"], + current=args["current"], + instruction=args["instruction"], + model_config=args["model_config"], + ideal_output=args["ideal_output"], + ) + return {"error": "incompatible parameters"}, 400 + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + + +class InstructionGenerationTemplateApi(Resource): + @setup_required + @login_required + @account_initialization_required + def post(self) -> dict: + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, default=False, location="json") + args = parser.parse_args() + match args["type"]: + case "prompt": + from core.llm_generator.prompts import INSTRUCTION_GENERATE_TEMPLATE_PROMPT + + return {"data": INSTRUCTION_GENERATE_TEMPLATE_PROMPT} + case "code": + from core.llm_generator.prompts import INSTRUCTION_GENERATE_TEMPLATE_CODE + + return {"data": INSTRUCTION_GENERATE_TEMPLATE_CODE} + case _: + raise ValueError(f"Invalid type: {args['type']}") + + api.add_resource(RuleGenerateApi, "/rule-generate") api.add_resource(RuleCodeGenerateApi, "/rule-code-generate") api.add_resource(RuleStructuredOutputGenerateApi, "/rule-structured-output-generate") +api.add_resource(InstructionGenerateApi, "/instruction-generate") +api.add_resource(InstructionGenerationTemplateApi, "/instruction-generate/template") diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 47e5a7916..64fc3a3e8 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -1,6 +1,7 @@ import json import logging import re +from collections.abc import Sequence from typing import Optional, cast import json_repair @@ -11,6 +12,8 @@ from core.llm_generator.prompts import ( CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT, JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE, + LLM_MODIFY_CODE_SYSTEM, + LLM_MODIFY_PROMPT_SYSTEM, PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE, SYSTEM_STRUCTURED_OUTPUT_GENERATE, WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, @@ -24,6 +27,9 @@ from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey +from core.workflow.graph_engine.entities.event import AgentLogEvent +from models import App, Message, WorkflowNodeExecutionModel, db class LLMGenerator: @@ -388,3 +394,181 @@ class LLMGenerator: except Exception as e: logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name")) return {"output": "", "error": f"An unexpected error occurred: {str(e)}"} + + @staticmethod + def instruction_modify_legacy( + tenant_id: str, flow_id: str, current: str, instruction: str, model_config: dict, ideal_output: str | None + ) -> dict: + app: App | None = db.session.query(App).filter(App.id == flow_id).first() + last_run: Message | None = ( + db.session.query(Message).filter(Message.app_id == flow_id).order_by(Message.created_at.desc()).first() + ) + if not last_run: + return LLMGenerator.__instruction_modify_common( + tenant_id=tenant_id, + model_config=model_config, + last_run=None, + current=current, + error_message="", + instruction=instruction, + node_type="llm", + ideal_output=ideal_output, + ) + last_run_dict = { + "query": last_run.query, + "answer": last_run.answer, + "error": last_run.error, + } + return LLMGenerator.__instruction_modify_common( + tenant_id=tenant_id, + model_config=model_config, + last_run=last_run_dict, + current=current, + error_message=str(last_run.error), + instruction=instruction, + node_type="llm", + ideal_output=ideal_output, + ) + + @staticmethod + def instruction_modify_workflow( + tenant_id: str, + flow_id: str, + node_id: str, + current: str, + instruction: str, + model_config: dict, + ideal_output: str | None, + ) -> dict: + from services.workflow_service import WorkflowService + + app: App | None = db.session.query(App).filter(App.id == flow_id).first() + if not app: + raise ValueError("App not found.") + workflow = WorkflowService().get_draft_workflow(app_model=app) + if not workflow: + raise ValueError("Workflow not found for the given app model.") + last_run = WorkflowService().get_node_last_run(app_model=app, workflow=workflow, node_id=node_id) + try: + node_type = cast(WorkflowNodeExecutionModel, last_run).node_type + except Exception: + try: + node_type = [it for it in workflow.graph_dict["graph"]["nodes"] if it["id"] == node_id][0]["data"][ + "type" + ] + except Exception: + node_type = "llm" + + if not last_run: # Node is not executed yet + return LLMGenerator.__instruction_modify_common( + tenant_id=tenant_id, + model_config=model_config, + last_run=None, + current=current, + error_message="", + instruction=instruction, + node_type=node_type, + ideal_output=ideal_output, + ) + + def agent_log_of(node_execution: WorkflowNodeExecutionModel) -> Sequence: + raw_agent_log = node_execution.execution_metadata_dict.get(WorkflowNodeExecutionMetadataKey.AGENT_LOG) + if not raw_agent_log: + return [] + parsed: Sequence[AgentLogEvent] = json.loads(raw_agent_log) + + def dict_of_event(event: AgentLogEvent) -> dict: + return { + "status": event.status, + "error": event.error, + "data": event.data, + } + + return [dict_of_event(event) for event in parsed] + + last_run_dict = { + "inputs": last_run.inputs_dict, + "status": last_run.status, + "error": last_run.error, + "agent_log": agent_log_of(last_run), + } + + return LLMGenerator.__instruction_modify_common( + tenant_id=tenant_id, + model_config=model_config, + last_run=last_run_dict, + current=current, + error_message=last_run.error, + instruction=instruction, + node_type=last_run.node_type, + ideal_output=ideal_output, + ) + + @staticmethod + def __instruction_modify_common( + tenant_id: str, + model_config: dict, + last_run: dict | None, + current: str | None, + error_message: str | None, + instruction: str, + node_type: str, + ideal_output: str | None, + ) -> dict: + LAST_RUN = "{{#last_run#}}" + CURRENT = "{{#current#}}" + ERROR_MESSAGE = "{{#error_message#}}" + injected_instruction = instruction + if LAST_RUN in injected_instruction: + injected_instruction = injected_instruction.replace(LAST_RUN, json.dumps(last_run)) + if CURRENT in injected_instruction: + injected_instruction = injected_instruction.replace(CURRENT, current or "null") + if ERROR_MESSAGE in injected_instruction: + injected_instruction = injected_instruction.replace(ERROR_MESSAGE, error_message or "null") + model_instance = ModelManager().get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=model_config.get("provider", ""), + model=model_config.get("name", ""), + ) + match node_type: + case "llm", "agent": + system_prompt = LLM_MODIFY_PROMPT_SYSTEM + case "code": + system_prompt = LLM_MODIFY_CODE_SYSTEM + case _: + system_prompt = LLM_MODIFY_PROMPT_SYSTEM + prompt_messages = [ + SystemPromptMessage(content=system_prompt), + UserPromptMessage( + content=json.dumps( + { + "current": current, + "last_run": last_run, + "instruction": injected_instruction, + "ideal_output": ideal_output, + } + ) + ), + ] + model_parameters = {"temperature": 0.4} + + try: + response = cast( + LLMResult, + model_instance.invoke_llm( + prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False + ), + ) + + generated_raw = cast(str, response.message.content) + first_brace = generated_raw.find("{") + last_brace = generated_raw.rfind("}") + return {**json.loads(generated_raw[first_brace : last_brace + 1])} + + except InvokeError as e: + error = str(e) + return {"error": f"Failed to generate code. Error: {error}"} + except Exception as e: + logging.exception("Failed to invoke LLM model, model: " + json.dumps(model_config.get("name")), exc_info=e) + return {"error": f"An unexpected error occurred: {str(e)}"} diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index ef81e38dc..e38828578 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -309,3 +309,116 @@ eg: Here is the JSON schema: {{schema}} """ # noqa: E501 + +LLM_MODIFY_PROMPT_SYSTEM = """ +Both your input and output should be in JSON format. + +! Below is the schema for input content ! +{ + "type": "object", + "description": "The user is trying to process some content with a prompt, but the output is not as expected. They hope to achieve their goal by modifying the prompt.", + "properties": { + "current": { + "type": "string", + "description": "The prompt before modification, where placeholders {{}} will be replaced with actual values for the large language model. The content in the placeholders should not be changed." + }, + "last_run": { + "type": "object", + "description": "The output result from the large language model after receiving the prompt.", + }, + "instruction": { + "type": "string", + "description": "User's instruction to edit the current prompt" + }, + "ideal_output": { + "type": "string", + "description": "The ideal output that the user expects from the large language model after modifying the prompt. You should compare the last output with the ideal output and make changes to the prompt to achieve the goal." + } + } +} +! Above is the schema for input content ! + +! Below is the schema for output content ! +{ + "type": "object", + "description": "Your feedback to the user after they provide modification suggestions.", + "properties": { + "modified": { + "type": "string", + "description": "Your modified prompt. You should change the original prompt as little as possible to achieve the goal. Keep the language of prompt if not asked to change" + }, + "message": { + "type": "string", + "description": "Your feedback to the user, in the user's language, explaining what you did and your thought process in text, providing sufficient emotional value to the user." + } + }, + "required": [ + "modified", + "message" + ] +} +! Above is the schema for output content ! + +Your output must strictly follow the schema format, do not output any content outside of the JSON body. +""" # noqa: E501 + +LLM_MODIFY_CODE_SYSTEM = """ +Both your input and output should be in JSON format. + +! Below is the schema for input content ! +{ + "type": "object", + "description": "The user is trying to process some data with a code snippet, but the result is not as expected. They hope to achieve their goal by modifying the code.", + "properties": { + "current": { + "type": "string", + "description": "The code before modification." + }, + "last_run": { + "type": "object", + "description": "The result of the code.", + }, + "message": { + "type": "string", + "description": "User's instruction to edit the current code" + } + } +} +! Above is the schema for input content ! + +! Below is the schema for output content ! +{ + "type": "object", + "description": "Your feedback to the user after they provide modification suggestions.", + "properties": { + "modified": { + "type": "string", + "description": "Your modified code. You should change the original code as little as possible to achieve the goal. Keep the programming language of code if not asked to change" + }, + "message": { + "type": "string", + "description": "Your feedback to the user, in the user's language, explaining what you did and your thought process in text, providing sufficient emotional value to the user." + } + }, + "required": [ + "modified", + "message" + ] +} +! Above is the schema for output content ! + +When you are modifying the code, you should remember: +- Do not use print, this not work in dify sandbox. +- Do not try dangerous call like deleting files. It's PROHIBITED. +- Do not use any library that is not built-in in with Python. +- Get inputs from the parameters of the function and have explicit type annotations. +- Write proper imports at the top of the code. +- Use return statement to return the result. +- You should return a `dict`. +Your output must strictly follow the schema format, do not output any content outside of the JSON body. +""" # noqa: E501 + +INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as expected: {{#last_run#}}. +You should edit the prompt according to the IDEAL OUTPUT.""" + +INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx index eb0f52438..a7bdc550d 100644 --- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx +++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx @@ -13,7 +13,7 @@ import Tooltip from '@/app/components/base/tooltip' import { AppType } from '@/types/app' import { getNewVar, getVars } from '@/utils/var' import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn' -import type { AutomaticRes } from '@/service/debug' +import type { GenRes } from '@/service/debug' import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res' import PromptEditor from '@/app/components/base/prompt-editor' import ConfigContext from '@/context/debug-configuration' @@ -61,6 +61,7 @@ const Prompt: FC = ({ const { eventEmitter } = useEventEmitterContextContext() const { + appId, modelConfig, dataSets, setModelConfig, @@ -139,21 +140,21 @@ const Prompt: FC = ({ } const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false) - const handleAutomaticRes = (res: AutomaticRes) => { + const handleAutomaticRes = (res: GenRes) => { // put eventEmitter in first place to prevent overwrite the configs.prompt_variables.But another problem is that prompt won't hight the prompt_variables. eventEmitter?.emit({ type: PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER, - payload: res.prompt, + payload: res.modified, } as any) const newModelConfig = produce(modelConfig, (draft) => { - draft.configs.prompt_template = res.prompt - draft.configs.prompt_variables = res.variables.map(key => ({ key, name: key, type: 'string', required: true })) + draft.configs.prompt_template = res.modified + draft.configs.prompt_variables = (res.variables || []).map(key => ({ key, name: key, type: 'string', required: true })) }) setModelConfig(newModelConfig) setPrevPromptConfig(modelConfig.configs) if (mode !== AppType.completion) { - setIntroduction(res.opening_statement) + setIntroduction(res.opening_statement || '') const newFeatures = produce(features, (draft) => { draft.opening = { ...draft.opening, @@ -272,10 +273,13 @@ const Prompt: FC = ({ {showAutomatic && ( )} diff --git a/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx b/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx index aacaa81ac..31f81d274 100644 --- a/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx +++ b/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import React, { useCallback, useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' -import { useBoolean } from 'ahooks' +import { useBoolean, useSessionStorageState } from 'ahooks' import { RiDatabase2Line, RiFileExcel2Line, @@ -14,24 +14,18 @@ import { RiTranslate, RiUser2Line, } from '@remixicon/react' -import cn from 'classnames' import s from './style.module.css' import Modal from '@/app/components/base/modal' import Button from '@/app/components/base/button' -import Textarea from '@/app/components/base/textarea' import Toast from '@/app/components/base/toast' -import { generateRule } from '@/service/debug' -import ConfigPrompt from '@/app/components/app/configuration/config-prompt' +import { generateBasicAppFistTimeRule, generateRule } from '@/service/debug' import type { CompletionParams, Model } from '@/types/app' -import { AppType } from '@/types/app' -import ConfigVar from '@/app/components/app/configuration/config-var' -import GroupName from '@/app/components/app/configuration/base/group-name' +import type { AppType } from '@/types/app' import Loading from '@/app/components/base/loading' import Confirm from '@/app/components/base/confirm' -import { LoveMessage } from '@/app/components/base/icons/src/vender/features' // type -import type { AutomaticRes } from '@/service/debug' +import type { GenRes } from '@/service/debug' import { Generator } from '@/app/components/base/icons/src/vender/other' import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal' @@ -39,13 +33,25 @@ import { ModelTypeEnum } from '@/app/components/header/account-setting/model-pro import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks' import type { ModelModeType } from '@/types/app' import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' +import InstructionEditorInWorkflow from './instruction-editor-in-workflow' +import InstructionEditorInBasic from './instruction-editor' +import { GeneratorType } from './types' +import Result from './result' +import useGenData from './use-gen-data' +import IdeaOutput from './idea-output' +import ResPlaceholder from './res-placeholder' +import { useGenerateRuleTemplate } from '@/service/use-apps' +const i18nPrefix = 'appDebug.generate' export type IGetAutomaticResProps = { mode: AppType isShow: boolean onClose: () => void - onFinished: (res: AutomaticRes) => void - isInLLMNode?: boolean + onFinished: (res: GenRes) => void + flowId?: string + nodeId?: string + currentPrompt?: string + isBasicMode?: boolean } const TryLabel: FC<{ @@ -68,7 +74,10 @@ const GetAutomaticRes: FC = ({ mode, isShow, onClose, - isInLLMNode, + flowId, + nodeId, + currentPrompt, + isBasicMode, onFinished, }) => { const { t } = useTranslation() @@ -123,13 +132,27 @@ const GetAutomaticRes: FC = ({ }, ] - const [instruction, setInstruction] = useState('') + const [instructionFromSessionStorage, setInstruction] = useSessionStorageState(`improve-instruction-${flowId}${isBasicMode ? '' : `-${nodeId}`}`) + const instruction = instructionFromSessionStorage || '' + const [ideaOutput, setIdeaOutput] = useState('') + + const [editorKey, setEditorKey] = useState(`${flowId}-0`) const handleChooseTemplate = useCallback((key: string) => { return () => { const template = t(`appDebug.generate.template.${key}.instruction`) setInstruction(template) + setEditorKey(`${flowId}-${Date.now()}`) } }, [t]) + + const { data: instructionTemplate } = useGenerateRuleTemplate(GeneratorType.prompt, isBasicMode) + useEffect(() => { + if (!instruction && instructionTemplate) + setInstruction(instructionTemplate.data) + + setEditorKey(`${flowId}-${Date.now()}`) + }, [instructionTemplate]) + const isValid = () => { if (instruction.trim() === '') { Toast.notify({ @@ -143,7 +166,10 @@ const GetAutomaticRes: FC = ({ return true } const [isLoading, { setTrue: setLoadingTrue, setFalse: setLoadingFalse }] = useBoolean(false) - const [res, setRes] = useState(null) + const storageKey = `${flowId}${isBasicMode ? '' : `-${nodeId}`}` + const { addVersion, current, currentVersionIndex, setCurrentVersionIndex, versions } = useGenData({ + storageKey, + }) useEffect(() => { if (defaultModel) { @@ -170,16 +196,6 @@ const GetAutomaticRes: FC = ({ ) - const renderNoData = ( -
- -
-
{t('appDebug.generate.noDataLine1')}
-
{t('appDebug.generate.noDataLine2')}
-
-
- ) - const handleModelChange = useCallback((newValue: { modelId: string; provider: string; mode?: string; features?: string[] }) => { const newModel = { ...model, @@ -207,28 +223,59 @@ const GetAutomaticRes: FC = ({ return setLoadingTrue() try { - const { error, ...res } = await generateRule({ - instruction, - model_config: model, - no_variable: !!isInLLMNode, - }) - setRes(res) - if (error) { - Toast.notify({ - type: 'error', - message: error, + let apiRes: GenRes + let hasError = false + if (isBasicMode || !currentPrompt) { + const { error, ...res } = await generateBasicAppFistTimeRule({ + instruction, + model_config: model, + no_variable: false, }) + apiRes = { + ...res, + modified: res.prompt, + } as GenRes + if (error) { + hasError = true + Toast.notify({ + type: 'error', + message: error, + }) + } } + else { + const { error, ...res } = await generateRule({ + flow_id: flowId, + node_id: nodeId, + current: currentPrompt, + instruction, + ideal_output: ideaOutput, + model_config: model, + }) + apiRes = res + if (error) { + hasError = true + Toast.notify({ + type: 'error', + message: error, + }) + } + } + if (!hasError) + addVersion(apiRes) } finally { setLoadingFalse() } } - const [showConfirmOverwrite, setShowConfirmOverwrite] = React.useState(false) + const [isShowConfirmOverwrite, { + setTrue: showConfirmOverwrite, + setFalse: hideShowConfirmOverwrite, + }] = useBoolean(false) const isShowAutoPromptResPlaceholder = () => { - return !isLoading && !res + return !isLoading && !current } return ( @@ -236,15 +283,14 @@ const GetAutomaticRes: FC = ({ isShow={isShow} onClose={onClose} className='min-w-[1140px] !p-0' - closable >
-
+
{t('appDebug.generate.title')}
{t('appDebug.generate.description')}
-
+
= ({ hideDebugWithMultipleModel />
-
-
-
{t('appDebug.generate.tryIt')}
-
-
-
- {tryList.map(item => ( - - ))} -
-
- {/* inputs */} -
-
-
{t('appDebug.generate.instruction')}
-