From adf104becdaa0a0460474e67962916cdfbef7823 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 27 Jan 2026 18:33:51 +0800 Subject: [PATCH] fix: enhanced structured output --- api/core/llm_generator/llm_generator.py | 2 +- api/core/llm_generator/output_parser/structured_output.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 96aace3883..d43b1b0588 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -654,7 +654,6 @@ class LLMGenerator: return f"""You are a code generator for Dify workflow automation. Generate {language} code to extract/transform available variables for the target parameter. -If user is not talking about the code node, provide the existing data or blank data for user, following the schema. ## Target Parameter {parameter_block} @@ -668,6 +667,7 @@ If user is not talking about the code node, provide the existing data or blank d - Respect target constraints (options/min/max/default/multiple) if provided. - If existing code is provided, adapt it instead of rewriting from scratch. - Return only JSON that matches the provided schema. +- If user is not talking about the code node, provide blank code/outputs/variables for user, say to user in `message`. """ @staticmethod diff --git a/api/core/llm_generator/output_parser/structured_output.py b/api/core/llm_generator/output_parser/structured_output.py index 1bc1184ca0..0498fabec7 100644 --- a/api/core/llm_generator/output_parser/structured_output.py +++ b/api/core/llm_generator/output_parser/structured_output.py @@ -156,10 +156,10 @@ def invoke_llm_with_structured_output( else: # Priority 3: Prompt-based fallback _set_response_format(model_parameters_with_json_schema, model_schema.parameter_rules) - prompt_messages = _handle_prompt_based_schema( - prompt_messages=prompt_messages, - structured_output_schema=json_schema, - ) + prompt_messages = _handle_prompt_based_schema( + prompt_messages=prompt_messages, + structured_output_schema=json_schema, + ) llm_result = model_instance.invoke_llm( prompt_messages=list(prompt_messages),