mirror of https://github.com/langgenius/dify.git
fix: remove output tokens constraint
Signed-off-by: Stream <Stream_2@qq.com>
This commit is contained in:
parent
87dba2765b
commit
bd2ee70c63
|
|
@ -555,7 +555,6 @@ class LLMGenerator:
|
|||
return {"questions": [], "error": f"Model schema not found for {model_name}"}
|
||||
|
||||
completion_params = model_config.get("completion_params", {}) if model_config else {}
|
||||
model_parameters = {**completion_params, "max_tokens": 256}
|
||||
try:
|
||||
response = invoke_llm_with_pydantic_model(
|
||||
provider=model_instance.provider,
|
||||
|
|
@ -563,7 +562,7 @@ class LLMGenerator:
|
|||
model_instance=model_instance,
|
||||
prompt_messages=prompt_messages,
|
||||
output_model=SuggestedQuestionsOutput,
|
||||
model_parameters=model_parameters,
|
||||
model_parameters=completion_params,
|
||||
stream=False,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue