fix(web): optimize prompt change logic for LLM nodes (#20841) (#20865)

This commit is contained in:
HyaCinth
2025-06-10 15:04:10 +08:00
committed by GitHub
parent c439e82038
commit fc6e2d14a5
2 changed files with 3 additions and 3 deletions

View File

@@ -247,11 +247,11 @@ const useConfig = (id: string, payload: LLMNodeType) => {
}, [inputs, setInputs])
const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => {
const newInputs = produce(inputRef.current, (draft) => {
const newInputs = produce(inputs, (draft) => {
draft.prompt_template = newPrompt
})
setInputs(newInputs)
}, [setInputs])
}, [inputs, setInputs])
const handleMemoryChange = useCallback((newMemory?: Memory) => {
const newInputs = produce(inputs, (draft) => {

View File

@@ -198,7 +198,7 @@ export type InputVar = {
hint?: string
options?: string[]
value_selector?: ValueSelector
hide: boolean
hide?: boolean
} & Partial<UploadFileSetting>
export type ModelConfig = {