fix: fix import errors caused by dify_graph.

This commit is contained in:
FFXN 2026-03-24 11:22:07 +08:00
parent 2ef7d8a798
commit a0d5d83787
17 changed files with 26 additions and 1666 deletions

View File

@ -21,7 +21,7 @@ from controllers.console.wraps import (
setup_required,
)
from core.evaluation.entities.evaluation_entity import EvaluationCategory, EvaluationConfigData, EvaluationRunRequest
from core.workflow.file import helpers as file_helpers
from dify_graph.file import helpers as file_helpers
from extensions.ext_database import db
from extensions.ext_storage import storage
from libs.helper import TimestampField
@ -161,7 +161,9 @@ def get_evaluation_target(view_func: Callable[P, R]):
.first()
)
elif target_type == "knowledge":
target = db.session.query(Dataset).where(Dataset.id == target_id, Dataset.tenant_id == current_tenant_id).first()
target = (db.session.query(Dataset)
.where(Dataset.id == target_id, Dataset.tenant_id == current_tenant_id)
.first())
if not target:
raise NotFound(f"{str(target_type)} not found")

View File

@ -34,7 +34,7 @@ from controllers.console.wraps import (
)
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.graph_engine.manager import GraphEngineManager
from dify_graph.graph_engine.manager import GraphEngineManager
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from factories import variable_factory

View File

@ -10,7 +10,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
EvaluationMetric,
)
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events.base import NodeRunResult
logger = logging.getLogger(__name__)
@ -162,7 +162,7 @@ class BaseEvaluationInstance(ABC):
``node_run_result_mapping``.
"""
from core.workflow.nodes.base.variable_template_parser import REGEX as VARIABLE_REGEX
from dify_graph.nodes.base.variable_template_parser import REGEX as VARIABLE_REGEX
workflow_inputs: dict[str, Any] = {}

View File

@ -103,7 +103,7 @@ LLM_METRIC_NAMES: list[EvaluationMetricName] = [
EvaluationMetricName.FAITHFULNESS, # Every claim is grounded in context; no hallucinations
EvaluationMetricName.ANSWER_RELEVANCY, # Response stays on-topic and addresses the question
EvaluationMetricName.ANSWER_CORRECTNESS, # Factual accuracy and completeness vs. reference
EvaluationMetricName.SEMANTIC_SIMILARITY, # Semantic closeness to the reference answer
EvaluationMetricName.SEMANTIC_SIMILARITY, # Semantic closeness to the reference answer
]
RETRIEVAL_METRIC_NAMES: list[EvaluationMetricName] = [

View File

@ -23,8 +23,8 @@ from core.evaluation.entities.judgment_entity import (
JudgmentConfig,
JudgmentResult,
)
from core.workflow.utils.condition.entities import SupportedComparisonOperator
from core.workflow.utils.condition.processor import _evaluate_condition # pyright: ignore[reportPrivateUsage]
from dify_graph.utils.condition.entities import SupportedComparisonOperator
from dify_graph.utils.condition.processor import _evaluate_condition # pyright: ignore[reportPrivateUsage]
logger = logging.getLogger(__name__)

View File

@ -12,7 +12,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
)
from core.evaluation.runners.base_evaluation_runner import BaseEvaluationRunner
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
from models.model import App
logger = logging.getLogger(__name__)

View File

@ -26,7 +26,7 @@ from core.evaluation.entities.evaluation_entity import (
)
from core.evaluation.entities.judgment_entity import JudgmentConfig
from core.evaluation.judgment.processor import JudgmentProcessor
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
from libs.datetime_utils import naive_utc_now
from models.evaluation import EvaluationRun, EvaluationRunItem, EvaluationRunStatus

View File

@ -12,7 +12,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
)
from core.evaluation.runners.base_evaluation_runner import BaseEvaluationRunner
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
logger = logging.getLogger(__name__)

View File

@ -11,7 +11,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
)
from core.evaluation.runners.base_evaluation_runner import BaseEvaluationRunner
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
logger = logging.getLogger(__name__)

View File

@ -22,7 +22,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
)
from core.evaluation.runners.base_evaluation_runner import BaseEvaluationRunner
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
from models.snippet import CustomizedSnippet
from models.workflow import WorkflowNodeExecutionModel

View File

@ -12,7 +12,7 @@ from core.evaluation.entities.evaluation_entity import (
EvaluationItemResult,
)
from core.evaluation.runners.base_evaluation_runner import BaseEvaluationRunner
from core.workflow.node_events import NodeRunResult
from dify_graph.node_events import NodeRunResult
logger = logging.getLogger(__name__)

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,8 @@ from core.evaluation.entities.evaluation_entity import (
NodeInfo,
)
from core.evaluation.evaluation_manager import EvaluationManager
from core.workflow.enums import WorkflowNodeExecutionMetadataKey
from core.workflow.node_events.base import NodeRunResult
from dify_graph.enums import WorkflowNodeExecutionMetadataKey
from dify_graph.node_events.base import NodeRunResult
from models.evaluation import (
EvaluationConfiguration,
EvaluationRun,
@ -727,7 +727,7 @@ class EvaluationService:
"""Query all node execution records for a workflow run."""
from sqlalchemy import asc, select
from core.workflow.enums import WorkflowNodeExecutionStatus
from dify_graph.enums import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionModel
stmt = (

View File

@ -7,15 +7,15 @@ from enum import StrEnum
from urllib.parse import urlparse
import yaml # type: ignore
from core.model_runtime.utils.encoders import jsonable_encoder
from packaging import version
from pydantic import BaseModel, Field
from sqlalchemy import select
from sqlalchemy.orm import Session
from core.helper import ssrf_proxy
from core.model_runtime.utils.encoders import jsonable_encoder
from core.plugin.entities.plugin import PluginDependency
from core.workflow.enums import NodeType
from dify_graph.enums import NodeType
from extensions.ext_redis import redis_client
from factories import variable_factory
from models import Account

View File

@ -28,7 +28,7 @@ from sqlalchemy.orm import make_transient
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.app.apps.workflow.app_generator import WorkflowAppGenerator
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.file import File
from dify_graph.file.models import File
from factories import file_factory
from models import Account
from models.model import AppMode, EndUser

View File

@ -7,9 +7,9 @@ from typing import Any
from sqlalchemy import func, select
from sqlalchemy.orm import Session, sessionmaker
from core.workflow.enums import NodeType
from core.workflow.nodes.node_mapping import LATEST_VERSION, NODE_TYPE_CLASSES_MAPPING
from core.workflow.variables.variables import VariableBase
from core.workflow.node_factory import LATEST_VERSION, NODE_TYPE_CLASSES_MAPPING
from dify_graph.enums import NodeType
from dify_graph.variables.variables import VariableBase
from extensions.ext_database import db
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from models import Account

View File

@ -24,7 +24,7 @@ from core.evaluation.runners.llm_evaluation_runner import LLMEvaluationRunner
from core.evaluation.runners.retrieval_evaluation_runner import RetrievalEvaluationRunner
from core.evaluation.runners.snippet_evaluation_runner import SnippetEvaluationRunner
from core.evaluation.runners.workflow_evaluation_runner import WorkflowEvaluationRunner
from core.workflow.node_events.base import NodeRunResult
from dify_graph.node_events import NodeRunResult
from extensions.ext_database import db
from libs.datetime_utils import naive_utc_now
from models.enums import CreatorUserRole