feat: Persist Variables for Enhanced Debugging Workflow (#20699)
This pull request introduces a feature aimed at improving the debugging experience during workflow editing. With the addition of variable persistence, the system will automatically retain the output variables from previously executed nodes. These persisted variables can then be reused when debugging subsequent nodes, eliminating the need for repetitive manual input. By streamlining this aspect of the workflow, the feature minimizes user errors and significantly reduces debugging effort, offering a smoother and more efficient experience. Key highlights of this change: - Automatic persistence of output variables for executed nodes. - Reuse of persisted variables to simplify input steps for nodes requiring them (e.g., `code`, `template`, `variable_assigner`). - Enhanced debugging experience with reduced friction. Closes #19735.pull/21395/head
parent
3113350e51
commit
10b738a296
@ -0,0 +1,421 @@
|
||||
import logging
|
||||
from typing import Any, NoReturn
|
||||
|
||||
from flask import Response
|
||||
from flask_restful import Resource, fields, inputs, marshal, marshal_with, reqparse
|
||||
from sqlalchemy.orm import Session
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import (
|
||||
DraftWorkflowNotExist,
|
||||
)
|
||||
from controllers.console.app.wraps import get_app_model
|
||||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from controllers.web.error import InvalidArgumentError, NotFoundError
|
||||
from core.variables.segment_group import SegmentGroup
|
||||
from core.variables.segments import ArrayFileSegment, FileSegment, Segment
|
||||
from core.variables.types import SegmentType
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from factories.file_factory import build_from_mapping, build_from_mappings
|
||||
from factories.variable_factory import build_segment_with_type
|
||||
from libs.login import current_user, login_required
|
||||
from models import App, AppMode, db
|
||||
from models.workflow import WorkflowDraftVariable
|
||||
from services.workflow_draft_variable_service import WorkflowDraftVariableList, WorkflowDraftVariableService
|
||||
from services.workflow_service import WorkflowService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _convert_values_to_json_serializable_object(value: Segment) -> Any:
|
||||
if isinstance(value, FileSegment):
|
||||
return value.value.model_dump()
|
||||
elif isinstance(value, ArrayFileSegment):
|
||||
return [i.model_dump() for i in value.value]
|
||||
elif isinstance(value, SegmentGroup):
|
||||
return [_convert_values_to_json_serializable_object(i) for i in value.value]
|
||||
else:
|
||||
return value.value
|
||||
|
||||
|
||||
def _serialize_var_value(variable: WorkflowDraftVariable) -> Any:
|
||||
value = variable.get_value()
|
||||
# create a copy of the value to avoid affecting the model cache.
|
||||
value = value.model_copy(deep=True)
|
||||
# Refresh the url signature before returning it to client.
|
||||
if isinstance(value, FileSegment):
|
||||
file = value.value
|
||||
file.remote_url = file.generate_url()
|
||||
elif isinstance(value, ArrayFileSegment):
|
||||
files = value.value
|
||||
for file in files:
|
||||
file.remote_url = file.generate_url()
|
||||
return _convert_values_to_json_serializable_object(value)
|
||||
|
||||
|
||||
def _create_pagination_parser():
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(
|
||||
"page",
|
||||
type=inputs.int_range(1, 100_000),
|
||||
required=False,
|
||||
default=1,
|
||||
location="args",
|
||||
help="the page of data requested",
|
||||
)
|
||||
parser.add_argument("limit", type=inputs.int_range(1, 100), required=False, default=20, location="args")
|
||||
return parser
|
||||
|
||||
|
||||
_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS = {
|
||||
"id": fields.String,
|
||||
"type": fields.String(attribute=lambda model: model.get_variable_type()),
|
||||
"name": fields.String,
|
||||
"description": fields.String,
|
||||
"selector": fields.List(fields.String, attribute=lambda model: model.get_selector()),
|
||||
"value_type": fields.String,
|
||||
"edited": fields.Boolean(attribute=lambda model: model.edited),
|
||||
"visible": fields.Boolean,
|
||||
}
|
||||
|
||||
_WORKFLOW_DRAFT_VARIABLE_FIELDS = dict(
|
||||
_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS,
|
||||
value=fields.Raw(attribute=_serialize_var_value),
|
||||
)
|
||||
|
||||
_WORKFLOW_DRAFT_ENV_VARIABLE_FIELDS = {
|
||||
"id": fields.String,
|
||||
"type": fields.String(attribute=lambda _: "env"),
|
||||
"name": fields.String,
|
||||
"description": fields.String,
|
||||
"selector": fields.List(fields.String, attribute=lambda model: model.get_selector()),
|
||||
"value_type": fields.String,
|
||||
"edited": fields.Boolean(attribute=lambda model: model.edited),
|
||||
"visible": fields.Boolean,
|
||||
}
|
||||
|
||||
_WORKFLOW_DRAFT_ENV_VARIABLE_LIST_FIELDS = {
|
||||
"items": fields.List(fields.Nested(_WORKFLOW_DRAFT_ENV_VARIABLE_FIELDS)),
|
||||
}
|
||||
|
||||
|
||||
def _get_items(var_list: WorkflowDraftVariableList) -> list[WorkflowDraftVariable]:
|
||||
return var_list.variables
|
||||
|
||||
|
||||
_WORKFLOW_DRAFT_VARIABLE_LIST_WITHOUT_VALUE_FIELDS = {
|
||||
"items": fields.List(fields.Nested(_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS), attribute=_get_items),
|
||||
"total": fields.Raw(),
|
||||
}
|
||||
|
||||
_WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS = {
|
||||
"items": fields.List(fields.Nested(_WORKFLOW_DRAFT_VARIABLE_FIELDS), attribute=_get_items),
|
||||
}
|
||||
|
||||
|
||||
def _api_prerequisite(f):
|
||||
"""Common prerequisites for all draft workflow variable APIs.
|
||||
|
||||
It ensures the following conditions are satisfied:
|
||||
|
||||
- Dify has been property setup.
|
||||
- The request user has logged in and initialized.
|
||||
- The requested app is a workflow or a chat flow.
|
||||
- The request user has the edit permission for the app.
|
||||
"""
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
|
||||
def wrapper(*args, **kwargs):
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class WorkflowVariableCollectionApi(Resource):
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_LIST_WITHOUT_VALUE_FIELDS)
|
||||
def get(self, app_model: App):
|
||||
"""
|
||||
Get draft workflow
|
||||
"""
|
||||
parser = _create_pagination_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
# fetch draft workflow by app_model
|
||||
workflow_service = WorkflowService()
|
||||
workflow_exist = workflow_service.is_workflow_exist(app_model=app_model)
|
||||
if not workflow_exist:
|
||||
raise DraftWorkflowNotExist()
|
||||
|
||||
# fetch draft workflow by app_model
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=session,
|
||||
)
|
||||
workflow_vars = draft_var_srv.list_variables_without_values(
|
||||
app_id=app_model.id,
|
||||
page=args.page,
|
||||
limit=args.limit,
|
||||
)
|
||||
|
||||
return workflow_vars
|
||||
|
||||
@_api_prerequisite
|
||||
def delete(self, app_model: App):
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=db.session(),
|
||||
)
|
||||
draft_var_srv.delete_workflow_variables(app_model.id)
|
||||
db.session.commit()
|
||||
return Response("", 204)
|
||||
|
||||
|
||||
def validate_node_id(node_id: str) -> NoReturn | None:
|
||||
if node_id in [
|
||||
CONVERSATION_VARIABLE_NODE_ID,
|
||||
SYSTEM_VARIABLE_NODE_ID,
|
||||
]:
|
||||
# NOTE(QuantumGhost): While we store the system and conversation variables as node variables
|
||||
# with specific `node_id` in database, we still want to make the API separated. By disallowing
|
||||
# accessing system and conversation variables in `WorkflowDraftNodeVariableListApi`,
|
||||
# we mitigate the risk that user of the API depending on the implementation detail of the API.
|
||||
#
|
||||
# ref: [Hyrum's Law](https://www.hyrumslaw.com/)
|
||||
|
||||
raise InvalidArgumentError(
|
||||
f"invalid node_id, please use correspond api for conversation and system variables, node_id={node_id}",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class NodeVariableCollectionApi(Resource):
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
def get(self, app_model: App, node_id: str):
|
||||
validate_node_id(node_id)
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=session,
|
||||
)
|
||||
node_vars = draft_var_srv.list_node_variables(app_model.id, node_id)
|
||||
|
||||
return node_vars
|
||||
|
||||
@_api_prerequisite
|
||||
def delete(self, app_model: App, node_id: str):
|
||||
validate_node_id(node_id)
|
||||
srv = WorkflowDraftVariableService(db.session())
|
||||
srv.delete_node_variables(app_model.id, node_id)
|
||||
db.session.commit()
|
||||
return Response("", 204)
|
||||
|
||||
|
||||
class VariableApi(Resource):
|
||||
_PATCH_NAME_FIELD = "name"
|
||||
_PATCH_VALUE_FIELD = "value"
|
||||
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_FIELDS)
|
||||
def get(self, app_model: App, variable_id: str):
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=db.session(),
|
||||
)
|
||||
variable = draft_var_srv.get_variable(variable_id=variable_id)
|
||||
if variable is None:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
if variable.app_id != app_model.id:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
return variable
|
||||
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_FIELDS)
|
||||
def patch(self, app_model: App, variable_id: str):
|
||||
# Request payload for file types:
|
||||
#
|
||||
# Local File:
|
||||
#
|
||||
# {
|
||||
# "type": "image",
|
||||
# "transfer_method": "local_file",
|
||||
# "url": "",
|
||||
# "upload_file_id": "daded54f-72c7-4f8e-9d18-9b0abdd9f190"
|
||||
# }
|
||||
#
|
||||
# Remote File:
|
||||
#
|
||||
#
|
||||
# {
|
||||
# "type": "image",
|
||||
# "transfer_method": "remote_url",
|
||||
# "url": "http://127.0.0.1:5001/files/1602650a-4fe4-423c-85a2-af76c083e3c4/file-preview?timestamp=1750041099&nonce=...&sign=...=",
|
||||
# "upload_file_id": "1602650a-4fe4-423c-85a2-af76c083e3c4"
|
||||
# }
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(self._PATCH_NAME_FIELD, type=str, required=False, nullable=True, location="json")
|
||||
# Parse 'value' field as-is to maintain its original data structure
|
||||
parser.add_argument(self._PATCH_VALUE_FIELD, type=lambda x: x, required=False, nullable=True, location="json")
|
||||
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=db.session(),
|
||||
)
|
||||
args = parser.parse_args(strict=True)
|
||||
|
||||
variable = draft_var_srv.get_variable(variable_id=variable_id)
|
||||
if variable is None:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
if variable.app_id != app_model.id:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
|
||||
new_name = args.get(self._PATCH_NAME_FIELD, None)
|
||||
raw_value = args.get(self._PATCH_VALUE_FIELD, None)
|
||||
if new_name is None and raw_value is None:
|
||||
return variable
|
||||
|
||||
new_value = None
|
||||
if raw_value is not None:
|
||||
if variable.value_type == SegmentType.FILE:
|
||||
if not isinstance(raw_value, dict):
|
||||
raise InvalidArgumentError(description=f"expected dict for file, got {type(raw_value)}")
|
||||
raw_value = build_from_mapping(mapping=raw_value, tenant_id=app_model.tenant_id)
|
||||
elif variable.value_type == SegmentType.ARRAY_FILE:
|
||||
if not isinstance(raw_value, list):
|
||||
raise InvalidArgumentError(description=f"expected list for files, got {type(raw_value)}")
|
||||
if len(raw_value) > 0 and not isinstance(raw_value[0], dict):
|
||||
raise InvalidArgumentError(description=f"expected dict for files[0], got {type(raw_value)}")
|
||||
raw_value = build_from_mappings(mappings=raw_value, tenant_id=app_model.tenant_id)
|
||||
new_value = build_segment_with_type(variable.value_type, raw_value)
|
||||
draft_var_srv.update_variable(variable, name=new_name, value=new_value)
|
||||
db.session.commit()
|
||||
return variable
|
||||
|
||||
@_api_prerequisite
|
||||
def delete(self, app_model: App, variable_id: str):
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=db.session(),
|
||||
)
|
||||
variable = draft_var_srv.get_variable(variable_id=variable_id)
|
||||
if variable is None:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
if variable.app_id != app_model.id:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
draft_var_srv.delete_variable(variable)
|
||||
db.session.commit()
|
||||
return Response("", 204)
|
||||
|
||||
|
||||
class VariableResetApi(Resource):
|
||||
@_api_prerequisite
|
||||
def put(self, app_model: App, variable_id: str):
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=db.session(),
|
||||
)
|
||||
|
||||
workflow_srv = WorkflowService()
|
||||
draft_workflow = workflow_srv.get_draft_workflow(app_model)
|
||||
if draft_workflow is None:
|
||||
raise NotFoundError(
|
||||
f"Draft workflow not found, app_id={app_model.id}",
|
||||
)
|
||||
variable = draft_var_srv.get_variable(variable_id=variable_id)
|
||||
if variable is None:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
if variable.app_id != app_model.id:
|
||||
raise NotFoundError(description=f"variable not found, id={variable_id}")
|
||||
|
||||
resetted = draft_var_srv.reset_variable(draft_workflow, variable)
|
||||
db.session.commit()
|
||||
if resetted is None:
|
||||
return Response("", 204)
|
||||
else:
|
||||
return marshal(resetted, _WORKFLOW_DRAFT_VARIABLE_FIELDS)
|
||||
|
||||
|
||||
def _get_variable_list(app_model: App, node_id) -> WorkflowDraftVariableList:
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
draft_var_srv = WorkflowDraftVariableService(
|
||||
session=session,
|
||||
)
|
||||
if node_id == CONVERSATION_VARIABLE_NODE_ID:
|
||||
draft_vars = draft_var_srv.list_conversation_variables(app_model.id)
|
||||
elif node_id == SYSTEM_VARIABLE_NODE_ID:
|
||||
draft_vars = draft_var_srv.list_system_variables(app_model.id)
|
||||
else:
|
||||
draft_vars = draft_var_srv.list_node_variables(app_id=app_model.id, node_id=node_id)
|
||||
return draft_vars
|
||||
|
||||
|
||||
class ConversationVariableCollectionApi(Resource):
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
def get(self, app_model: App):
|
||||
# NOTE(QuantumGhost): Prefill conversation variables into the draft variables table
|
||||
# so their IDs can be returned to the caller.
|
||||
workflow_srv = WorkflowService()
|
||||
draft_workflow = workflow_srv.get_draft_workflow(app_model)
|
||||
if draft_workflow is None:
|
||||
raise NotFoundError(description=f"draft workflow not found, id={app_model.id}")
|
||||
draft_var_srv = WorkflowDraftVariableService(db.session())
|
||||
draft_var_srv.prefill_conversation_variable_default_values(draft_workflow)
|
||||
db.session.commit()
|
||||
return _get_variable_list(app_model, CONVERSATION_VARIABLE_NODE_ID)
|
||||
|
||||
|
||||
class SystemVariableCollectionApi(Resource):
|
||||
@_api_prerequisite
|
||||
@marshal_with(_WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
def get(self, app_model: App):
|
||||
return _get_variable_list(app_model, SYSTEM_VARIABLE_NODE_ID)
|
||||
|
||||
|
||||
class EnvironmentVariableCollectionApi(Resource):
|
||||
@_api_prerequisite
|
||||
def get(self, app_model: App):
|
||||
"""
|
||||
Get draft workflow
|
||||
"""
|
||||
# fetch draft workflow by app_model
|
||||
workflow_service = WorkflowService()
|
||||
workflow = workflow_service.get_draft_workflow(app_model=app_model)
|
||||
if workflow is None:
|
||||
raise DraftWorkflowNotExist()
|
||||
|
||||
env_vars = workflow.environment_variables
|
||||
env_vars_list = []
|
||||
for v in env_vars:
|
||||
env_vars_list.append(
|
||||
{
|
||||
"id": v.id,
|
||||
"type": "env",
|
||||
"name": v.name,
|
||||
"description": v.description,
|
||||
"selector": v.selector,
|
||||
"value_type": v.value_type.value,
|
||||
"value": v.value,
|
||||
# Do not track edited for env vars.
|
||||
"edited": False,
|
||||
"visible": True,
|
||||
"editable": True,
|
||||
}
|
||||
)
|
||||
|
||||
return {"items": env_vars_list}
|
||||
|
||||
|
||||
api.add_resource(
|
||||
WorkflowVariableCollectionApi,
|
||||
"/apps/<uuid:app_id>/workflows/draft/variables",
|
||||
)
|
||||
api.add_resource(NodeVariableCollectionApi, "/apps/<uuid:app_id>/workflows/draft/nodes/<string:node_id>/variables")
|
||||
api.add_resource(VariableApi, "/apps/<uuid:app_id>/workflows/draft/variables/<uuid:variable_id>")
|
||||
api.add_resource(VariableResetApi, "/apps/<uuid:app_id>/workflows/draft/variables/<uuid:variable_id>/reset")
|
||||
|
||||
api.add_resource(ConversationVariableCollectionApi, "/apps/<uuid:app_id>/workflows/draft/conversation-variables")
|
||||
api.add_resource(SystemVariableCollectionApi, "/apps/<uuid:app_id>/workflows/draft/system-variables")
|
||||
api.add_resource(EnvironmentVariableCollectionApi, "/apps/<uuid:app_id>/workflows/draft/environment-variables")
|
||||
@ -1 +1,11 @@
|
||||
from typing import Any
|
||||
|
||||
# TODO(QuantumGhost): Refactor variable type identification. Instead of directly
|
||||
# comparing `dify_model_identity` with constants throughout the codebase, extract
|
||||
# this logic into a dedicated function. This would encapsulate the implementation
|
||||
# details of how different variable types are identified.
|
||||
FILE_MODEL_IDENTITY = "__dify__file__"
|
||||
|
||||
|
||||
def maybe_file_object(o: Any) -> bool:
|
||||
return isinstance(o, dict) and o.get("dify_model_identity") == FILE_MODEL_IDENTITY
|
||||
|
||||
@ -1,8 +1,26 @@
|
||||
import json
|
||||
from collections.abc import Iterable, Sequence
|
||||
|
||||
from .segment_group import SegmentGroup
|
||||
from .segments import ArrayFileSegment, FileSegment, Segment
|
||||
|
||||
|
||||
def to_selector(node_id: str, name: str, paths: Iterable[str] = ()) -> Sequence[str]:
|
||||
selectors = [node_id, name]
|
||||
if paths:
|
||||
selectors.extend(paths)
|
||||
return selectors
|
||||
|
||||
|
||||
class SegmentJSONEncoder(json.JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, ArrayFileSegment):
|
||||
return [v.model_dump() for v in o.value]
|
||||
elif isinstance(o, FileSegment):
|
||||
return o.value.model_dump()
|
||||
elif isinstance(o, SegmentGroup):
|
||||
return [self.default(seg) for seg in o.value]
|
||||
elif isinstance(o, Segment):
|
||||
return o.value
|
||||
else:
|
||||
super().default(o)
|
||||
|
||||
@ -0,0 +1,39 @@
|
||||
import abc
|
||||
from typing import Protocol
|
||||
|
||||
from core.variables import Variable
|
||||
|
||||
|
||||
class ConversationVariableUpdater(Protocol):
|
||||
"""
|
||||
ConversationVariableUpdater defines an abstraction for updating conversation variable values.
|
||||
|
||||
It is intended for use by `v1.VariableAssignerNode` and `v2.VariableAssignerNode` when updating
|
||||
conversation variables.
|
||||
|
||||
Implementations may choose to batch updates. If batching is used, the `flush` method
|
||||
should be implemented to persist buffered changes, and `update`
|
||||
should handle buffering accordingly.
|
||||
|
||||
Note: Since implementations may buffer updates, instances of ConversationVariableUpdater
|
||||
are not thread-safe. Each VariableAssignerNode should create its own instance during execution.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def update(self, conversation_id: str, variable: "Variable") -> None:
|
||||
"""
|
||||
Updates the value of the specified conversation variable in the underlying storage.
|
||||
|
||||
:param conversation_id: The ID of the conversation to update. Typically references `ConversationVariable.id`.
|
||||
:param variable: The `Variable` instance containing the updated value.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def flush(self):
|
||||
"""
|
||||
Flushes all pending updates to the underlying storage system.
|
||||
|
||||
If the implementation does not buffer updates, this method can be a no-op.
|
||||
"""
|
||||
pass
|
||||
@ -1,19 +1,55 @@
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session
|
||||
from collections.abc import Mapping, MutableMapping, Sequence
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from core.variables import Variable
|
||||
from core.workflow.nodes.variable_assigner.common.exc import VariableOperatorNodeError
|
||||
from extensions.ext_database import db
|
||||
from models import ConversationVariable
|
||||
from pydantic import BaseModel
|
||||
|
||||
from core.variables import Segment
|
||||
from core.variables.consts import MIN_SELECTORS_LENGTH
|
||||
from core.variables.types import SegmentType
|
||||
|
||||
def update_conversation_variable(conversation_id: str, variable: Variable):
|
||||
stmt = select(ConversationVariable).where(
|
||||
ConversationVariable.id == variable.id, ConversationVariable.conversation_id == conversation_id
|
||||
# Use double underscore (`__`) prefix for internal variables
|
||||
# to minimize risk of collision with user-defined variable names.
|
||||
_UPDATED_VARIABLES_KEY = "__updated_variables"
|
||||
|
||||
|
||||
class UpdatedVariable(BaseModel):
|
||||
name: str
|
||||
selector: Sequence[str]
|
||||
value_type: SegmentType
|
||||
new_value: Any
|
||||
|
||||
|
||||
_T = TypeVar("_T", bound=MutableMapping[str, Any])
|
||||
|
||||
|
||||
def variable_to_processed_data(selector: Sequence[str], seg: Segment) -> UpdatedVariable:
|
||||
if len(selector) < MIN_SELECTORS_LENGTH:
|
||||
raise Exception("selector too short")
|
||||
node_id, var_name = selector[:2]
|
||||
return UpdatedVariable(
|
||||
name=var_name,
|
||||
selector=list(selector[:2]),
|
||||
value_type=seg.value_type,
|
||||
new_value=seg.value,
|
||||
)
|
||||
with Session(db.engine) as session:
|
||||
row = session.scalar(stmt)
|
||||
if not row:
|
||||
raise VariableOperatorNodeError("conversation variable not found in the database")
|
||||
row.data = variable.model_dump_json()
|
||||
session.commit()
|
||||
|
||||
|
||||
def set_updated_variables(m: _T, updates: Sequence[UpdatedVariable]) -> _T:
|
||||
m[_UPDATED_VARIABLES_KEY] = updates
|
||||
return m
|
||||
|
||||
|
||||
def get_updated_variables(m: Mapping[str, Any]) -> Sequence[UpdatedVariable] | None:
|
||||
updated_values = m.get(_UPDATED_VARIABLES_KEY, None)
|
||||
if updated_values is None:
|
||||
return None
|
||||
result = []
|
||||
for items in updated_values:
|
||||
if isinstance(items, UpdatedVariable):
|
||||
result.append(items)
|
||||
elif isinstance(items, dict):
|
||||
items = UpdatedVariable.model_validate(items)
|
||||
result.append(items)
|
||||
else:
|
||||
raise TypeError(f"Invalid updated variable: {items}, type={type(items)}")
|
||||
return result
|
||||
|
||||
@ -0,0 +1,38 @@
|
||||
from sqlalchemy import Engine, select
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from core.variables.variables import Variable
|
||||
from models.engine import db
|
||||
from models.workflow import ConversationVariable
|
||||
|
||||
from .exc import VariableOperatorNodeError
|
||||
|
||||
|
||||
class ConversationVariableUpdaterImpl:
|
||||
_engine: Engine | None
|
||||
|
||||
def __init__(self, engine: Engine | None = None) -> None:
|
||||
self._engine = engine
|
||||
|
||||
def _get_engine(self) -> Engine:
|
||||
if self._engine:
|
||||
return self._engine
|
||||
return db.engine
|
||||
|
||||
def update(self, conversation_id: str, variable: Variable):
|
||||
stmt = select(ConversationVariable).where(
|
||||
ConversationVariable.id == variable.id, ConversationVariable.conversation_id == conversation_id
|
||||
)
|
||||
with Session(self._get_engine()) as session:
|
||||
row = session.scalar(stmt)
|
||||
if not row:
|
||||
raise VariableOperatorNodeError("conversation variable not found in the database")
|
||||
row.data = variable.model_dump_json()
|
||||
session.commit()
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
|
||||
def conversation_variable_updater_factory() -> ConversationVariableUpdaterImpl:
|
||||
return ConversationVariableUpdaterImpl()
|
||||
@ -0,0 +1,79 @@
|
||||
import abc
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, Protocol
|
||||
|
||||
from core.variables import Variable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
|
||||
|
||||
class VariableLoader(Protocol):
|
||||
"""Interface for loading variables based on selectors.
|
||||
|
||||
A `VariableLoader` is responsible for retrieving additional variables required during the execution
|
||||
of a single node, which are not provided as user inputs.
|
||||
|
||||
NOTE(QuantumGhost): Typically, all variables loaded by a `VariableLoader` should belong to the same
|
||||
application and share the same `app_id`. However, this interface does not enforce that constraint,
|
||||
and the `app_id` parameter is intentionally omitted from `load_variables` to achieve separation of
|
||||
concern and allow for flexible implementations.
|
||||
|
||||
Implementations of `VariableLoader` should almost always have an `app_id` parameter in
|
||||
their constructor.
|
||||
|
||||
TODO(QuantumGhost): this is a temporally workaround. If we can move the creation of node instance into
|
||||
`WorkflowService.single_step_run`, we may get rid of this interface.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def load_variables(self, selectors: list[list[str]]) -> list[Variable]:
|
||||
"""Load variables based on the provided selectors. If the selectors are empty,
|
||||
this method should return an empty list.
|
||||
|
||||
The order of the returned variables is not guaranteed. If the caller wants to ensure
|
||||
a specific order, they should sort the returned list themselves.
|
||||
|
||||
:param: selectors: a list of string list, each inner list should have at least two elements:
|
||||
- the first element is the node ID,
|
||||
- the second element is the variable name.
|
||||
:return: a list of Variable objects that match the provided selectors.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class _DummyVariableLoader(VariableLoader):
|
||||
"""A dummy implementation of VariableLoader that does not load any variables.
|
||||
Serves as a placeholder when no variable loading is needed.
|
||||
"""
|
||||
|
||||
def load_variables(self, selectors: list[list[str]]) -> list[Variable]:
|
||||
return []
|
||||
|
||||
|
||||
DUMMY_VARIABLE_LOADER = _DummyVariableLoader()
|
||||
|
||||
|
||||
def load_into_variable_pool(
|
||||
variable_loader: VariableLoader,
|
||||
variable_pool: VariablePool,
|
||||
variable_mapping: Mapping[str, Sequence[str]],
|
||||
user_inputs: Mapping[str, Any],
|
||||
):
|
||||
# Loading missing variable from draft var here, and set it into
|
||||
# variable_pool.
|
||||
variables_to_load: list[list[str]] = []
|
||||
for key, selector in variable_mapping.items():
|
||||
# NOTE(QuantumGhost): this logic needs to be in sync with
|
||||
# `WorkflowEntry.mapping_user_inputs_to_variable_pool`.
|
||||
node_variable_list = key.split(".")
|
||||
if len(node_variable_list) < 1:
|
||||
raise ValueError(f"Invalid variable key: {key}. It should have at least one element.")
|
||||
if key in user_inputs:
|
||||
continue
|
||||
node_variable_key = ".".join(node_variable_list[1:])
|
||||
if node_variable_key in user_inputs:
|
||||
continue
|
||||
if variable_pool.get(selector) is None:
|
||||
variables_to_load.append(list(selector))
|
||||
loaded = variable_loader.load_variables(variables_to_load)
|
||||
for var in loaded:
|
||||
variable_pool.add(var.selector, var)
|
||||
@ -0,0 +1,49 @@
|
||||
import json
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from core.file.models import File
|
||||
from core.variables import Segment
|
||||
|
||||
|
||||
class WorkflowRuntimeTypeEncoder(json.JSONEncoder):
|
||||
def default(self, o: Any):
|
||||
if isinstance(o, Segment):
|
||||
return o.value
|
||||
elif isinstance(o, File):
|
||||
return o.to_dict()
|
||||
elif isinstance(o, BaseModel):
|
||||
return o.model_dump(mode="json")
|
||||
else:
|
||||
return super().default(o)
|
||||
|
||||
|
||||
class WorkflowRuntimeTypeConverter:
|
||||
def to_json_encodable(self, value: Mapping[str, Any] | None) -> Mapping[str, Any] | None:
|
||||
result = self._to_json_encodable_recursive(value)
|
||||
return result if isinstance(result, Mapping) or result is None else dict(result)
|
||||
|
||||
def _to_json_encodable_recursive(self, value: Any) -> Any:
|
||||
if value is None:
|
||||
return value
|
||||
if isinstance(value, (bool, int, str, float)):
|
||||
return value
|
||||
if isinstance(value, Segment):
|
||||
return self._to_json_encodable_recursive(value.value)
|
||||
if isinstance(value, File):
|
||||
return value.to_dict()
|
||||
if isinstance(value, BaseModel):
|
||||
return value.model_dump(mode="json")
|
||||
if isinstance(value, dict):
|
||||
res = {}
|
||||
for k, v in value.items():
|
||||
res[k] = self._to_json_encodable_recursive(v)
|
||||
return res
|
||||
if isinstance(value, list):
|
||||
res_list = []
|
||||
for item in value:
|
||||
res_list.append(self._to_json_encodable_recursive(item))
|
||||
return res_list
|
||||
return value
|
||||
@ -0,0 +1,22 @@
|
||||
import abc
|
||||
import datetime
|
||||
from typing import Protocol
|
||||
|
||||
|
||||
class _NowFunction(Protocol):
|
||||
@abc.abstractmethod
|
||||
def __call__(self, tz: datetime.timezone | None) -> datetime.datetime:
|
||||
pass
|
||||
|
||||
|
||||
# _now_func is a callable with the _NowFunction signature.
|
||||
# Its sole purpose is to abstract time retrieval, enabling
|
||||
# developers to mock this behavior in tests and time-dependent scenarios.
|
||||
_now_func: _NowFunction = datetime.datetime.now
|
||||
|
||||
|
||||
def naive_utc_now() -> datetime.datetime:
|
||||
"""Return a naive datetime object (without timezone information)
|
||||
representing current UTC time.
|
||||
"""
|
||||
return _now_func(datetime.UTC).replace(tzinfo=None)
|
||||
@ -0,0 +1,11 @@
|
||||
import json
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class PydanticModelEncoder(json.JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, BaseModel):
|
||||
return o.model_dump()
|
||||
else:
|
||||
super().default(o)
|
||||
@ -0,0 +1,20 @@
|
||||
"""All these exceptions are not meant to be caught by callers."""
|
||||
|
||||
|
||||
class WorkflowDataError(Exception):
|
||||
"""Base class for all workflow data related exceptions.
|
||||
|
||||
This should be used to indicate issues with workflow data integrity, such as
|
||||
no `graph` configuration, missing `nodes` field in `graph` configuration, or
|
||||
similar issues.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NodeNotFoundError(WorkflowDataError):
|
||||
"""Raised when a node with the specified ID is not found in the workflow."""
|
||||
|
||||
def __init__(self, node_id: str):
|
||||
super().__init__(f"Node with ID '{node_id}' not found in the workflow.")
|
||||
self.node_id = node_id
|
||||
@ -0,0 +1,721 @@
|
||||
import dataclasses
|
||||
import datetime
|
||||
import logging
|
||||
from collections.abc import Mapping, Sequence
|
||||
from enum import StrEnum
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from sqlalchemy import Engine, orm, select
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy.sql.expression import and_, or_
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.file.models import File
|
||||
from core.variables import Segment, StringSegment, Variable
|
||||
from core.variables.consts import MIN_SELECTORS_LENGTH
|
||||
from core.variables.segments import ArrayFileSegment, FileSegment
|
||||
from core.variables.types import SegmentType
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes import NodeType
|
||||
from core.workflow.nodes.variable_assigner.common.helpers import get_updated_variables
|
||||
from core.workflow.variable_loader import VariableLoader
|
||||
from factories.file_factory import StorageKeyLoader
|
||||
from factories.variable_factory import build_segment, segment_to_variable
|
||||
from models import App, Conversation
|
||||
from models.enums import DraftVariableType
|
||||
from models.workflow import Workflow, WorkflowDraftVariable, WorkflowNodeExecutionModel, is_system_variable_editable
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class WorkflowDraftVariableList:
|
||||
variables: list[WorkflowDraftVariable]
|
||||
total: int | None = None
|
||||
|
||||
|
||||
class WorkflowDraftVariableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VariableResetError(WorkflowDraftVariableError):
|
||||
pass
|
||||
|
||||
|
||||
class UpdateNotSupportedError(WorkflowDraftVariableError):
|
||||
pass
|
||||
|
||||
|
||||
class DraftVarLoader(VariableLoader):
|
||||
# This implements the VariableLoader interface for loading draft variables.
|
||||
#
|
||||
# ref: core.workflow.variable_loader.VariableLoader
|
||||
|
||||
# Database engine used for loading variables.
|
||||
_engine: Engine
|
||||
# Application ID for which variables are being loaded.
|
||||
_app_id: str
|
||||
_tenant_id: str
|
||||
_fallback_variables: Sequence[Variable]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
engine: Engine,
|
||||
app_id: str,
|
||||
tenant_id: str,
|
||||
fallback_variables: Sequence[Variable] | None = None,
|
||||
) -> None:
|
||||
self._engine = engine
|
||||
self._app_id = app_id
|
||||
self._tenant_id = tenant_id
|
||||
self._fallback_variables = fallback_variables or []
|
||||
|
||||
def _selector_to_tuple(self, selector: Sequence[str]) -> tuple[str, str]:
|
||||
return (selector[0], selector[1])
|
||||
|
||||
def load_variables(self, selectors: list[list[str]]) -> list[Variable]:
|
||||
if not selectors:
|
||||
return []
|
||||
|
||||
# Map each selector (as a tuple via `_selector_to_tuple`) to its corresponding Variable instance.
|
||||
variable_by_selector: dict[tuple[str, str], Variable] = {}
|
||||
|
||||
with Session(bind=self._engine, expire_on_commit=False) as session:
|
||||
srv = WorkflowDraftVariableService(session)
|
||||
draft_vars = srv.get_draft_variables_by_selectors(self._app_id, selectors)
|
||||
|
||||
for draft_var in draft_vars:
|
||||
segment = draft_var.get_value()
|
||||
variable = segment_to_variable(
|
||||
segment=segment,
|
||||
selector=draft_var.get_selector(),
|
||||
id=draft_var.id,
|
||||
name=draft_var.name,
|
||||
description=draft_var.description,
|
||||
)
|
||||
selector_tuple = self._selector_to_tuple(variable.selector)
|
||||
variable_by_selector[selector_tuple] = variable
|
||||
|
||||
# Important:
|
||||
files: list[File] = []
|
||||
for draft_var in draft_vars:
|
||||
value = draft_var.get_value()
|
||||
if isinstance(value, FileSegment):
|
||||
files.append(value.value)
|
||||
elif isinstance(value, ArrayFileSegment):
|
||||
files.extend(value.value)
|
||||
with Session(bind=self._engine) as session:
|
||||
storage_key_loader = StorageKeyLoader(session, tenant_id=self._tenant_id)
|
||||
storage_key_loader.load_storage_keys(files)
|
||||
|
||||
return list(variable_by_selector.values())
|
||||
|
||||
|
||||
class WorkflowDraftVariableService:
|
||||
_session: Session
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self._session = session
|
||||
|
||||
def get_variable(self, variable_id: str) -> WorkflowDraftVariable | None:
|
||||
return self._session.query(WorkflowDraftVariable).filter(WorkflowDraftVariable.id == variable_id).first()
|
||||
|
||||
def get_draft_variables_by_selectors(
|
||||
self,
|
||||
app_id: str,
|
||||
selectors: Sequence[list[str]],
|
||||
) -> list[WorkflowDraftVariable]:
|
||||
ors = []
|
||||
for selector in selectors:
|
||||
node_id, name = selector
|
||||
ors.append(and_(WorkflowDraftVariable.node_id == node_id, WorkflowDraftVariable.name == name))
|
||||
|
||||
# NOTE(QuantumGhost): Although the number of `or` expressions may be large, as long as
|
||||
# each expression includes conditions on both `node_id` and `name` (which are covered by the unique index),
|
||||
# PostgreSQL can efficiently retrieve the results using a bitmap index scan.
|
||||
#
|
||||
# Alternatively, a `SELECT` statement could be constructed for each selector and
|
||||
# combined using `UNION` to fetch all rows.
|
||||
# Benchmarking indicates that both approaches yield comparable performance.
|
||||
variables = (
|
||||
self._session.query(WorkflowDraftVariable).where(WorkflowDraftVariable.app_id == app_id, or_(*ors)).all()
|
||||
)
|
||||
return variables
|
||||
|
||||
def list_variables_without_values(self, app_id: str, page: int, limit: int) -> WorkflowDraftVariableList:
|
||||
criteria = WorkflowDraftVariable.app_id == app_id
|
||||
total = None
|
||||
query = self._session.query(WorkflowDraftVariable).filter(criteria)
|
||||
if page == 1:
|
||||
total = query.count()
|
||||
variables = (
|
||||
# Do not load the `value` field.
|
||||
query.options(orm.defer(WorkflowDraftVariable.value))
|
||||
.order_by(WorkflowDraftVariable.id.desc())
|
||||
.limit(limit)
|
||||
.offset((page - 1) * limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return WorkflowDraftVariableList(variables=variables, total=total)
|
||||
|
||||
def _list_node_variables(self, app_id: str, node_id: str) -> WorkflowDraftVariableList:
|
||||
criteria = (
|
||||
WorkflowDraftVariable.app_id == app_id,
|
||||
WorkflowDraftVariable.node_id == node_id,
|
||||
)
|
||||
query = self._session.query(WorkflowDraftVariable).filter(*criteria)
|
||||
variables = query.order_by(WorkflowDraftVariable.id.desc()).all()
|
||||
return WorkflowDraftVariableList(variables=variables)
|
||||
|
||||
def list_node_variables(self, app_id: str, node_id: str) -> WorkflowDraftVariableList:
|
||||
return self._list_node_variables(app_id, node_id)
|
||||
|
||||
def list_conversation_variables(self, app_id: str) -> WorkflowDraftVariableList:
|
||||
return self._list_node_variables(app_id, CONVERSATION_VARIABLE_NODE_ID)
|
||||
|
||||
def list_system_variables(self, app_id: str) -> WorkflowDraftVariableList:
|
||||
return self._list_node_variables(app_id, SYSTEM_VARIABLE_NODE_ID)
|
||||
|
||||
def get_conversation_variable(self, app_id: str, name: str) -> WorkflowDraftVariable | None:
|
||||
return self._get_variable(app_id=app_id, node_id=CONVERSATION_VARIABLE_NODE_ID, name=name)
|
||||
|
||||
def get_system_variable(self, app_id: str, name: str) -> WorkflowDraftVariable | None:
|
||||
return self._get_variable(app_id=app_id, node_id=SYSTEM_VARIABLE_NODE_ID, name=name)
|
||||
|
||||
def get_node_variable(self, app_id: str, node_id: str, name: str) -> WorkflowDraftVariable | None:
|
||||
return self._get_variable(app_id, node_id, name)
|
||||
|
||||
def _get_variable(self, app_id: str, node_id: str, name: str) -> WorkflowDraftVariable | None:
|
||||
variable = (
|
||||
self._session.query(WorkflowDraftVariable)
|
||||
.where(
|
||||
WorkflowDraftVariable.app_id == app_id,
|
||||
WorkflowDraftVariable.node_id == node_id,
|
||||
WorkflowDraftVariable.name == name,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
return variable
|
||||
|
||||
def update_variable(
|
||||
self,
|
||||
variable: WorkflowDraftVariable,
|
||||
name: str | None = None,
|
||||
value: Segment | None = None,
|
||||
) -> WorkflowDraftVariable:
|
||||
if not variable.editable:
|
||||
raise UpdateNotSupportedError(f"variable not support updating, id={variable.id}")
|
||||
if name is not None:
|
||||
variable.set_name(name)
|
||||
if value is not None:
|
||||
variable.set_value(value)
|
||||
variable.last_edited_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
self._session.flush()
|
||||
return variable
|
||||
|
||||
def _reset_conv_var(self, workflow: Workflow, variable: WorkflowDraftVariable) -> WorkflowDraftVariable | None:
|
||||
conv_var_by_name = {i.name: i for i in workflow.conversation_variables}
|
||||
conv_var = conv_var_by_name.get(variable.name)
|
||||
|
||||
if conv_var is None:
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
_logger.warning(
|
||||
"Conversation variable not found for draft variable, id=%s, name=%s", variable.id, variable.name
|
||||
)
|
||||
return None
|
||||
|
||||
variable.set_value(conv_var)
|
||||
variable.last_edited_at = None
|
||||
self._session.add(variable)
|
||||
self._session.flush()
|
||||
return variable
|
||||
|
||||
def _reset_node_var(self, workflow: Workflow, variable: WorkflowDraftVariable) -> WorkflowDraftVariable | None:
|
||||
# If a variable does not allow updating, it makes no sence to resetting it.
|
||||
if not variable.editable:
|
||||
return variable
|
||||
# No execution record for this variable, delete the variable instead.
|
||||
if variable.node_execution_id is None:
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
_logger.warning("draft variable has no node_execution_id, id=%s, name=%s", variable.id, variable.name)
|
||||
return None
|
||||
|
||||
query = select(WorkflowNodeExecutionModel).where(WorkflowNodeExecutionModel.id == variable.node_execution_id)
|
||||
node_exec = self._session.scalars(query).first()
|
||||
if node_exec is None:
|
||||
_logger.warning(
|
||||
"Node exectution not found for draft variable, id=%s, name=%s, node_execution_id=%s",
|
||||
variable.id,
|
||||
variable.name,
|
||||
variable.node_execution_id,
|
||||
)
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
return None
|
||||
|
||||
# Get node type for proper value extraction
|
||||
node_config = workflow.get_node_config_by_id(variable.node_id)
|
||||
node_type = workflow.get_node_type_from_node_config(node_config)
|
||||
|
||||
outputs_dict = node_exec.outputs_dict or {}
|
||||
|
||||
# Note: Based on the implementation in `_build_from_variable_assigner_mapping`,
|
||||
# VariableAssignerNode (both v1 and v2) can only create conversation draft variables.
|
||||
# For consistency, we should simply return when processing VARIABLE_ASSIGNER nodes.
|
||||
#
|
||||
# This implementation must remain synchronized with the `_build_from_variable_assigner_mapping`
|
||||
# and `save` methods.
|
||||
if node_type == NodeType.VARIABLE_ASSIGNER:
|
||||
return variable
|
||||
|
||||
if variable.name not in outputs_dict:
|
||||
# If variable not found in execution data, delete the variable
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
return None
|
||||
value = outputs_dict[variable.name]
|
||||
value_seg = WorkflowDraftVariable.build_segment_with_type(variable.value_type, value)
|
||||
# Extract variable value using unified logic
|
||||
variable.set_value(value_seg)
|
||||
variable.last_edited_at = None # Reset to indicate this is a reset operation
|
||||
self._session.flush()
|
||||
return variable
|
||||
|
||||
def reset_variable(self, workflow: Workflow, variable: WorkflowDraftVariable) -> WorkflowDraftVariable | None:
|
||||
variable_type = variable.get_variable_type()
|
||||
if variable_type == DraftVariableType.CONVERSATION:
|
||||
return self._reset_conv_var(workflow, variable)
|
||||
elif variable_type == DraftVariableType.NODE:
|
||||
return self._reset_node_var(workflow, variable)
|
||||
else:
|
||||
raise VariableResetError(f"cannot reset system variable, variable_id={variable.id}")
|
||||
|
||||
def delete_variable(self, variable: WorkflowDraftVariable):
|
||||
self._session.delete(variable)
|
||||
|
||||
def delete_workflow_variables(self, app_id: str):
|
||||
(
|
||||
self._session.query(WorkflowDraftVariable)
|
||||
.filter(WorkflowDraftVariable.app_id == app_id)
|
||||
.delete(synchronize_session=False)
|
||||
)
|
||||
|
||||
def delete_node_variables(self, app_id: str, node_id: str):
|
||||
return self._delete_node_variables(app_id, node_id)
|
||||
|
||||
def _delete_node_variables(self, app_id: str, node_id: str):
|
||||
self._session.query(WorkflowDraftVariable).where(
|
||||
WorkflowDraftVariable.app_id == app_id,
|
||||
WorkflowDraftVariable.node_id == node_id,
|
||||
).delete()
|
||||
|
||||
def _get_conversation_id_from_draft_variable(self, app_id: str) -> str | None:
|
||||
draft_var = self._get_variable(
|
||||
app_id=app_id,
|
||||
node_id=SYSTEM_VARIABLE_NODE_ID,
|
||||
name=str(SystemVariableKey.CONVERSATION_ID),
|
||||
)
|
||||
if draft_var is None:
|
||||
return None
|
||||
segment = draft_var.get_value()
|
||||
if not isinstance(segment, StringSegment):
|
||||
_logger.warning(
|
||||
"sys.conversation_id variable is not a string: app_id=%s, id=%s",
|
||||
app_id,
|
||||
draft_var.id,
|
||||
)
|
||||
return None
|
||||
return segment.value
|
||||
|
||||
def get_or_create_conversation(
|
||||
self,
|
||||
account_id: str,
|
||||
app: App,
|
||||
workflow: Workflow,
|
||||
) -> str:
|
||||
"""
|
||||
get_or_create_conversation creates and returns the ID of a conversation for debugging.
|
||||
|
||||
If a conversation already exists, as determined by the following criteria, its ID is returned:
|
||||
- The system variable `sys.conversation_id` exists in the draft variable table, and
|
||||
- A corresponding conversation record is found in the database.
|
||||
|
||||
If no such conversation exists, a new conversation is created and its ID is returned.
|
||||
"""
|
||||
conv_id = self._get_conversation_id_from_draft_variable(workflow.app_id)
|
||||
|
||||
if conv_id is not None:
|
||||
conversation = (
|
||||
self._session.query(Conversation)
|
||||
.filter(
|
||||
Conversation.id == conv_id,
|
||||
Conversation.app_id == workflow.app_id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
# Only return the conversation ID if it exists and is valid (has a correspond conversation record in DB).
|
||||
if conversation is not None:
|
||||
return conv_id
|
||||
conversation = Conversation(
|
||||
app_id=workflow.app_id,
|
||||
app_model_config_id=app.app_model_config_id,
|
||||
model_provider=None,
|
||||
model_id="",
|
||||
override_model_configs=None,
|
||||
mode=app.mode,
|
||||
name="Draft Debugging Conversation",
|
||||
inputs={},
|
||||
introduction="",
|
||||
system_instruction="",
|
||||
system_instruction_tokens=0,
|
||||
status="normal",
|
||||
invoke_from=InvokeFrom.DEBUGGER.value,
|
||||
from_source="console",
|
||||
from_end_user_id=None,
|
||||
from_account_id=account_id,
|
||||
)
|
||||
|
||||
self._session.add(conversation)
|
||||
self._session.flush()
|
||||
return conversation.id
|
||||
|
||||
def prefill_conversation_variable_default_values(self, workflow: Workflow):
|
||||
""""""
|
||||
draft_conv_vars: list[WorkflowDraftVariable] = []
|
||||
for conv_var in workflow.conversation_variables:
|
||||
draft_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=workflow.app_id,
|
||||
name=conv_var.name,
|
||||
value=conv_var,
|
||||
description=conv_var.description,
|
||||
)
|
||||
draft_conv_vars.append(draft_var)
|
||||
_batch_upsert_draft_varaible(
|
||||
self._session,
|
||||
draft_conv_vars,
|
||||
policy=_UpsertPolicy.IGNORE,
|
||||
)
|
||||
|
||||
|
||||
class _UpsertPolicy(StrEnum):
|
||||
IGNORE = "ignore"
|
||||
OVERWRITE = "overwrite"
|
||||
|
||||
|
||||
def _batch_upsert_draft_varaible(
|
||||
session: Session,
|
||||
draft_vars: Sequence[WorkflowDraftVariable],
|
||||
policy: _UpsertPolicy = _UpsertPolicy.OVERWRITE,
|
||||
) -> None:
|
||||
if not draft_vars:
|
||||
return None
|
||||
# Although we could use SQLAlchemy ORM operations here, we choose not to for several reasons:
|
||||
#
|
||||
# 1. The variable saving process involves writing multiple rows to the
|
||||
# `workflow_draft_variables` table. Batch insertion significantly improves performance.
|
||||
# 2. Using the ORM would require either:
|
||||
#
|
||||
# a. Checking for the existence of each variable before insertion,
|
||||
# resulting in 2n SQL statements for n variables and potential concurrency issues.
|
||||
# b. Attempting insertion first, then updating if a unique index violation occurs,
|
||||
# which still results in n to 2n SQL statements.
|
||||
#
|
||||
# Both approaches are inefficient and suboptimal.
|
||||
# 3. We do not need to retrieve the results of the SQL execution or populate ORM
|
||||
# model instances with the returned values.
|
||||
# 4. Batch insertion with `ON CONFLICT DO UPDATE` allows us to insert or update all
|
||||
# variables in a single SQL statement, avoiding the issues above.
|
||||
#
|
||||
# For these reasons, we use the SQLAlchemy query builder and rely on dialect-specific
|
||||
# insert operations instead of the ORM layer.
|
||||
stmt = insert(WorkflowDraftVariable).values([_model_to_insertion_dict(v) for v in draft_vars])
|
||||
if policy == _UpsertPolicy.OVERWRITE:
|
||||
stmt = stmt.on_conflict_do_update(
|
||||
index_elements=WorkflowDraftVariable.unique_app_id_node_id_name(),
|
||||
set_={
|
||||
"updated_at": stmt.excluded.updated_at,
|
||||
"last_edited_at": stmt.excluded.last_edited_at,
|
||||
"description": stmt.excluded.description,
|
||||
"value_type": stmt.excluded.value_type,
|
||||
"value": stmt.excluded.value,
|
||||
"visible": stmt.excluded.visible,
|
||||
"editable": stmt.excluded.editable,
|
||||
"node_execution_id": stmt.excluded.node_execution_id,
|
||||
},
|
||||
)
|
||||
elif _UpsertPolicy.IGNORE:
|
||||
stmt = stmt.on_conflict_do_nothing(index_elements=WorkflowDraftVariable.unique_app_id_node_id_name())
|
||||
else:
|
||||
raise Exception("Invalid value for update policy.")
|
||||
session.execute(stmt)
|
||||
|
||||
|
||||
def _model_to_insertion_dict(model: WorkflowDraftVariable) -> dict[str, Any]:
|
||||
d: dict[str, Any] = {
|
||||
"app_id": model.app_id,
|
||||
"last_edited_at": None,
|
||||
"node_id": model.node_id,
|
||||
"name": model.name,
|
||||
"selector": model.selector,
|
||||
"value_type": model.value_type,
|
||||
"value": model.value,
|
||||
"node_execution_id": model.node_execution_id,
|
||||
}
|
||||
if model.visible is not None:
|
||||
d["visible"] = model.visible
|
||||
if model.editable is not None:
|
||||
d["editable"] = model.editable
|
||||
if model.created_at is not None:
|
||||
d["created_at"] = model.created_at
|
||||
if model.updated_at is not None:
|
||||
d["updated_at"] = model.updated_at
|
||||
if model.description is not None:
|
||||
d["description"] = model.description
|
||||
return d
|
||||
|
||||
|
||||
def _build_segment_for_serialized_values(v: Any) -> Segment:
|
||||
"""
|
||||
Reconstructs Segment objects from serialized values, with special handling
|
||||
for FileSegment and ArrayFileSegment types.
|
||||
|
||||
This function should only be used when:
|
||||
1. No explicit type information is available
|
||||
2. The input value is in serialized form (dict or list)
|
||||
|
||||
It detects potential file objects in the serialized data and properly rebuilds the
|
||||
appropriate segment type.
|
||||
"""
|
||||
return build_segment(WorkflowDraftVariable.rebuild_file_types(v))
|
||||
|
||||
|
||||
class DraftVariableSaver:
|
||||
# _DUMMY_OUTPUT_IDENTITY is a placeholder output for workflow nodes.
|
||||
# Its sole possible value is `None`.
|
||||
#
|
||||
# This is used to signal the execution of a workflow node when it has no other outputs.
|
||||
_DUMMY_OUTPUT_IDENTITY: ClassVar[str] = "__dummy__"
|
||||
_DUMMY_OUTPUT_VALUE: ClassVar[None] = None
|
||||
|
||||
# _EXCLUDE_VARIABLE_NAMES_MAPPING maps node types and versions to variable names that
|
||||
# should be excluded when saving draft variables. This prevents certain internal or
|
||||
# technical variables from being exposed in the draft environment, particularly those
|
||||
# that aren't meant to be directly edited or viewed by users.
|
||||
_EXCLUDE_VARIABLE_NAMES_MAPPING: dict[NodeType, frozenset[str]] = {
|
||||
NodeType.LLM: frozenset(["finish_reason"]),
|
||||
NodeType.LOOP: frozenset(["loop_round"]),
|
||||
}
|
||||
|
||||
# Database session used for persisting draft variables.
|
||||
_session: Session
|
||||
|
||||
# The application ID associated with the draft variables.
|
||||
# This should match the `Workflow.app_id` of the workflow to which the current node belongs.
|
||||
_app_id: str
|
||||
|
||||
# The ID of the node for which DraftVariableSaver is saving output variables.
|
||||
_node_id: str
|
||||
|
||||
# The type of the current node (see NodeType).
|
||||
_node_type: NodeType
|
||||
|
||||
# Indicates how the workflow execution was triggered (see InvokeFrom).
|
||||
_invoke_from: InvokeFrom
|
||||
|
||||
#
|
||||
_node_execution_id: str
|
||||
|
||||
# _enclosing_node_id identifies the container node that the current node belongs to.
|
||||
# For example, if the current node is an LLM node inside an Iteration node
|
||||
# or Loop node, then `_enclosing_node_id` refers to the ID of
|
||||
# the containing Iteration or Loop node.
|
||||
#
|
||||
# If the current node is not nested within another node, `_enclosing_node_id` is
|
||||
# `None`.
|
||||
_enclosing_node_id: str | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session: Session,
|
||||
app_id: str,
|
||||
node_id: str,
|
||||
node_type: NodeType,
|
||||
invoke_from: InvokeFrom,
|
||||
node_execution_id: str,
|
||||
enclosing_node_id: str | None = None,
|
||||
):
|
||||
self._session = session
|
||||
self._app_id = app_id
|
||||
self._node_id = node_id
|
||||
self._node_type = node_type
|
||||
self._invoke_from = invoke_from
|
||||
self._node_execution_id = node_execution_id
|
||||
self._enclosing_node_id = enclosing_node_id
|
||||
|
||||
def _create_dummy_output_variable(self):
|
||||
return WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._app_id,
|
||||
node_id=self._node_id,
|
||||
name=self._DUMMY_OUTPUT_IDENTITY,
|
||||
node_execution_id=self._node_execution_id,
|
||||
value=build_segment(self._DUMMY_OUTPUT_VALUE),
|
||||
visible=False,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def _should_save_output_variables_for_draft(self) -> bool:
|
||||
# Only save output variables for debugging execution of workflow.
|
||||
if self._invoke_from != InvokeFrom.DEBUGGER:
|
||||
return False
|
||||
if self._enclosing_node_id is not None and self._node_type != NodeType.VARIABLE_ASSIGNER:
|
||||
# Currently we do not save output variables for nodes inside loop or iteration.
|
||||
return False
|
||||
return True
|
||||
|
||||
def _build_from_variable_assigner_mapping(self, process_data: Mapping[str, Any]) -> list[WorkflowDraftVariable]:
|
||||
draft_vars: list[WorkflowDraftVariable] = []
|
||||
updated_variables = get_updated_variables(process_data) or []
|
||||
|
||||
for item in updated_variables:
|
||||
selector = item.selector
|
||||
if len(selector) < MIN_SELECTORS_LENGTH:
|
||||
raise Exception("selector too short")
|
||||
# NOTE(QuantumGhost): only the following two kinds of variable could be updated by
|
||||
# VariableAssigner: ConversationVariable and iteration variable.
|
||||
# We only save conversation variable here.
|
||||
if selector[0] != CONVERSATION_VARIABLE_NODE_ID:
|
||||
continue
|
||||
segment = WorkflowDraftVariable.build_segment_with_type(segment_type=item.value_type, value=item.new_value)
|
||||
draft_vars.append(
|
||||
WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=self._app_id,
|
||||
name=item.name,
|
||||
value=segment,
|
||||
)
|
||||
)
|
||||
# Add a dummy output variable to indicate that this node is executed.
|
||||
draft_vars.append(self._create_dummy_output_variable())
|
||||
return draft_vars
|
||||
|
||||
def _build_variables_from_start_mapping(self, output: Mapping[str, Any]) -> list[WorkflowDraftVariable]:
|
||||
draft_vars = []
|
||||
has_non_sys_variables = False
|
||||
for name, value in output.items():
|
||||
value_seg = _build_segment_for_serialized_values(value)
|
||||
node_id, name = self._normalize_variable_for_start_node(name)
|
||||
# If node_id is not `sys`, it means that the variable is a user-defined input field
|
||||
# in `Start` node.
|
||||
if node_id != SYSTEM_VARIABLE_NODE_ID:
|
||||
draft_vars.append(
|
||||
WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._app_id,
|
||||
node_id=self._node_id,
|
||||
name=name,
|
||||
node_execution_id=self._node_execution_id,
|
||||
value=value_seg,
|
||||
visible=True,
|
||||
editable=True,
|
||||
)
|
||||
)
|
||||
has_non_sys_variables = True
|
||||
else:
|
||||
if name == SystemVariableKey.FILES:
|
||||
# Here we know the type of variable must be `array[file]`, we
|
||||
# just build files from the value.
|
||||
files = [File.model_validate(v) for v in value]
|
||||
if files:
|
||||
value_seg = WorkflowDraftVariable.build_segment_with_type(SegmentType.ARRAY_FILE, files)
|
||||
else:
|
||||
value_seg = ArrayFileSegment(value=[])
|
||||
|
||||
draft_vars.append(
|
||||
WorkflowDraftVariable.new_sys_variable(
|
||||
app_id=self._app_id,
|
||||
name=name,
|
||||
node_execution_id=self._node_execution_id,
|
||||
value=value_seg,
|
||||
editable=self._should_variable_be_editable(node_id, name),
|
||||
)
|
||||
)
|
||||
if not has_non_sys_variables:
|
||||
draft_vars.append(self._create_dummy_output_variable())
|
||||
return draft_vars
|
||||
|
||||
def _normalize_variable_for_start_node(self, name: str) -> tuple[str, str]:
|
||||
if not name.startswith(f"{SYSTEM_VARIABLE_NODE_ID}."):
|
||||
return self._node_id, name
|
||||
_, name_ = name.split(".", maxsplit=1)
|
||||
return SYSTEM_VARIABLE_NODE_ID, name_
|
||||
|
||||
def _build_variables_from_mapping(self, output: Mapping[str, Any]) -> list[WorkflowDraftVariable]:
|
||||
draft_vars = []
|
||||
for name, value in output.items():
|
||||
if not self._should_variable_be_saved(name):
|
||||
_logger.debug(
|
||||
"Skip saving variable as it has been excluded by its node_type, name=%s, node_type=%s",
|
||||
name,
|
||||
self._node_type,
|
||||
)
|
||||
continue
|
||||
if isinstance(value, Segment):
|
||||
value_seg = value
|
||||
else:
|
||||
value_seg = _build_segment_for_serialized_values(value)
|
||||
draft_vars.append(
|
||||
WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._app_id,
|
||||
node_id=self._node_id,
|
||||
name=name,
|
||||
node_execution_id=self._node_execution_id,
|
||||
value=value_seg,
|
||||
visible=self._should_variable_be_visible(self._node_id, self._node_type, name),
|
||||
)
|
||||
)
|
||||
return draft_vars
|
||||
|
||||
def save(
|
||||
self,
|
||||
process_data: Mapping[str, Any] | None = None,
|
||||
outputs: Mapping[str, Any] | None = None,
|
||||
):
|
||||
draft_vars: list[WorkflowDraftVariable] = []
|
||||
if outputs is None:
|
||||
outputs = {}
|
||||
if process_data is None:
|
||||
process_data = {}
|
||||
if not self._should_save_output_variables_for_draft():
|
||||
return
|
||||
if self._node_type == NodeType.VARIABLE_ASSIGNER:
|
||||
draft_vars = self._build_from_variable_assigner_mapping(process_data=process_data)
|
||||
elif self._node_type == NodeType.START:
|
||||
draft_vars = self._build_variables_from_start_mapping(outputs)
|
||||
else:
|
||||
draft_vars = self._build_variables_from_mapping(outputs)
|
||||
_batch_upsert_draft_varaible(self._session, draft_vars)
|
||||
|
||||
@staticmethod
|
||||
def _should_variable_be_editable(node_id: str, name: str) -> bool:
|
||||
if node_id in (CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID):
|
||||
return False
|
||||
if node_id == SYSTEM_VARIABLE_NODE_ID and not is_system_variable_editable(name):
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _should_variable_be_visible(node_id: str, node_type: NodeType, name: str) -> bool:
|
||||
if node_type in NodeType.IF_ELSE:
|
||||
return False
|
||||
if node_id == SYSTEM_VARIABLE_NODE_ID and not is_system_variable_editable(name):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _should_variable_be_saved(self, name: str) -> bool:
|
||||
exclude_var_names = self._EXCLUDE_VARIABLE_NAMES_MAPPING.get(self._node_type)
|
||||
if exclude_var_names is None:
|
||||
return True
|
||||
return name not in exclude_var_names
|
||||
@ -1,107 +1,217 @@
|
||||
# OpenAI API Key
|
||||
OPENAI_API_KEY=
|
||||
FLASK_APP=app.py
|
||||
FLASK_DEBUG=0
|
||||
SECRET_KEY='uhySf6a3aZuvRNfAlcr47paOw9TRYBY6j8ZHXpVw1yx5RP27Yj3w2uvI'
|
||||
|
||||
CONSOLE_API_URL=http://127.0.0.1:5001
|
||||
CONSOLE_WEB_URL=http://127.0.0.1:3000
|
||||
|
||||
# Service API base URL
|
||||
SERVICE_API_URL=http://127.0.0.1:5001
|
||||
|
||||
# Web APP base URL
|
||||
APP_WEB_URL=http://127.0.0.1:3000
|
||||
|
||||
# Files URL
|
||||
FILES_URL=http://127.0.0.1:5001
|
||||
|
||||
# The time in seconds after the signature is rejected
|
||||
FILES_ACCESS_TIMEOUT=300
|
||||
|
||||
# Access token expiration time in minutes
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||||
|
||||
# Refresh token expiration time in days
|
||||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||||
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
|
||||
|
||||
# redis configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_USERNAME=
|
||||
REDIS_PASSWORD=difyai123456
|
||||
REDIS_USE_SSL=false
|
||||
REDIS_DB=0
|
||||
|
||||
# PostgreSQL database configuration
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# Storage configuration
|
||||
# use for store upload files, private keys...
|
||||
# storage type: opendal, s3, aliyun-oss, azure-blob, baidu-obs, google-storage, huawei-obs, oci-storage, tencent-cos, volcengine-tos, supabase
|
||||
STORAGE_TYPE=opendal
|
||||
|
||||
# Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal
|
||||
OPENDAL_SCHEME=fs
|
||||
OPENDAL_FS_ROOT=storage
|
||||
|
||||
# CORS configuration
|
||||
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
||||
|
||||
# Vector database configuration
|
||||
# support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase
|
||||
VECTOR_STORE=weaviate
|
||||
# Weaviate configuration
|
||||
WEAVIATE_ENDPOINT=http://localhost:8080
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_GRPC_ENABLED=false
|
||||
WEAVIATE_BATCH_SIZE=100
|
||||
|
||||
|
||||
# Upload configuration
|
||||
UPLOAD_FILE_SIZE_LIMIT=15
|
||||
UPLOAD_FILE_BATCH_LIMIT=5
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# Model configuration
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
PROMPT_GENERATION_MAX_TOKENS=4096
|
||||
CODE_GENERATION_MAX_TOKENS=1024
|
||||
|
||||
# Mail configuration, support: resend, smtp
|
||||
MAIL_TYPE=
|
||||
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@example.com>
|
||||
RESEND_API_KEY=
|
||||
RESEND_API_URL=https://api.resend.com
|
||||
# smtp configuration
|
||||
SMTP_SERVER=smtp.example.com
|
||||
SMTP_PORT=465
|
||||
SMTP_USERNAME=123
|
||||
SMTP_PASSWORD=abc
|
||||
SMTP_USE_TLS=true
|
||||
SMTP_OPPORTUNISTIC_TLS=false
|
||||
|
||||
# Sentry configuration
|
||||
SENTRY_DSN=
|
||||
|
||||
# DEBUG
|
||||
DEBUG=false
|
||||
SQLALCHEMY_ECHO=false
|
||||
|
||||
# Notion import configuration, support public and internal
|
||||
NOTION_INTEGRATION_TYPE=public
|
||||
NOTION_CLIENT_SECRET=you-client-secret
|
||||
NOTION_CLIENT_ID=you-client-id
|
||||
NOTION_INTERNAL_SECRET=you-internal-secret
|
||||
|
||||
ETL_TYPE=dify
|
||||
UNSTRUCTURED_API_URL=
|
||||
UNSTRUCTURED_API_KEY=
|
||||
SCARF_NO_ANALYTICS=false
|
||||
|
||||
#ssrf
|
||||
SSRF_PROXY_HTTP_URL=
|
||||
SSRF_PROXY_HTTPS_URL=
|
||||
SSRF_DEFAULT_MAX_RETRIES=3
|
||||
SSRF_DEFAULT_TIME_OUT=5
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||||
|
||||
BATCH_UPLOAD_LIMIT=10
|
||||
KEYWORD_DATA_SOURCE_TYPE=database
|
||||
|
||||
# Workflow file upload limit
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||||
|
||||
# Azure OpenAI API Base Endpoint & API Key
|
||||
AZURE_OPENAI_API_BASE=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
|
||||
# Anthropic API Key
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
# Replicate API Key
|
||||
REPLICATE_API_KEY=
|
||||
|
||||
# Hugging Face API Key
|
||||
HUGGINGFACE_API_KEY=
|
||||
HUGGINGFACE_TEXT_GEN_ENDPOINT_URL=
|
||||
HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL=
|
||||
HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL=
|
||||
|
||||
# Minimax Credentials
|
||||
MINIMAX_API_KEY=
|
||||
MINIMAX_GROUP_ID=
|
||||
|
||||
# Spark Credentials
|
||||
SPARK_APP_ID=
|
||||
SPARK_API_KEY=
|
||||
SPARK_API_SECRET=
|
||||
|
||||
# Tongyi Credentials
|
||||
TONGYI_DASHSCOPE_API_KEY=
|
||||
|
||||
# Wenxin Credentials
|
||||
WENXIN_API_KEY=
|
||||
WENXIN_SECRET_KEY=
|
||||
|
||||
# ZhipuAI Credentials
|
||||
ZHIPUAI_API_KEY=
|
||||
|
||||
# Baichuan Credentials
|
||||
BAICHUAN_API_KEY=
|
||||
BAICHUAN_SECRET_KEY=
|
||||
|
||||
# ChatGLM Credentials
|
||||
CHATGLM_API_BASE=
|
||||
|
||||
# Xinference Credentials
|
||||
XINFERENCE_SERVER_URL=
|
||||
XINFERENCE_GENERATION_MODEL_UID=
|
||||
XINFERENCE_CHAT_MODEL_UID=
|
||||
XINFERENCE_EMBEDDINGS_MODEL_UID=
|
||||
XINFERENCE_RERANK_MODEL_UID=
|
||||
|
||||
# OpenLLM Credentials
|
||||
OPENLLM_SERVER_URL=
|
||||
|
||||
# LocalAI Credentials
|
||||
LOCALAI_SERVER_URL=
|
||||
|
||||
# Cohere Credentials
|
||||
COHERE_API_KEY=
|
||||
|
||||
# Jina Credentials
|
||||
JINA_API_KEY=
|
||||
|
||||
# Ollama Credentials
|
||||
OLLAMA_BASE_URL=
|
||||
# CODE EXECUTION CONFIGURATION
|
||||
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
|
||||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||||
CODE_MAX_NUMBER=9223372036854775807
|
||||
CODE_MIN_NUMBER=-9223372036854775808
|
||||
CODE_MAX_STRING_LENGTH=80000
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||||
|
||||
# API Tool configuration
|
||||
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
|
||||
API_TOOL_DEFAULT_READ_TIMEOUT=60
|
||||
|
||||
# HTTP Node configuration
|
||||
HTTP_REQUEST_MAX_CONNECT_TIMEOUT=300
|
||||
HTTP_REQUEST_MAX_READ_TIMEOUT=600
|
||||
HTTP_REQUEST_MAX_WRITE_TIMEOUT=600
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||||
|
||||
# Respect X-* headers to redirect clients
|
||||
RESPECT_XFORWARD_HEADERS_ENABLED=false
|
||||
|
||||
# Log file path
|
||||
LOG_FILE=
|
||||
# Log file max size, the unit is MB
|
||||
LOG_FILE_MAX_SIZE=20
|
||||
# Log file max backup count
|
||||
LOG_FILE_BACKUP_COUNT=5
|
||||
# Log dateformat
|
||||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||||
# Log Timezone
|
||||
LOG_TZ=UTC
|
||||
# Log format
|
||||
LOG_FORMAT=%(asctime)s,%(msecs)d %(levelname)-2s [%(filename)s:%(lineno)d] %(req_id)s %(message)s
|
||||
|
||||
# Indexing configuration
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||||
|
||||
# Workflow runtime configuration
|
||||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||||
WORKFLOW_MAX_EXECUTION_TIME=1200
|
||||
WORKFLOW_CALL_MAX_DEPTH=5
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
|
||||
MAX_VARIABLE_SIZE=204800
|
||||
|
||||
# App configuration
|
||||
APP_MAX_EXECUTION_TIME=1200
|
||||
APP_MAX_ACTIVE_REQUESTS=0
|
||||
|
||||
# Celery beat configuration
|
||||
CELERY_BEAT_SCHEDULER_TIME=1
|
||||
|
||||
# Position configuration
|
||||
POSITION_TOOL_PINS=
|
||||
POSITION_TOOL_INCLUDES=
|
||||
POSITION_TOOL_EXCLUDES=
|
||||
|
||||
POSITION_PROVIDER_PINS=
|
||||
POSITION_PROVIDER_INCLUDES=
|
||||
POSITION_PROVIDER_EXCLUDES=
|
||||
|
||||
# Together API Key
|
||||
TOGETHER_API_KEY=
|
||||
# Plugin configuration
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
|
||||
PLUGIN_REMOTE_INSTALL_PORT=5003
|
||||
PLUGIN_REMOTE_INSTALL_HOST=localhost
|
||||
PLUGIN_MAX_PACKAGE_SIZE=15728640
|
||||
INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
|
||||
# Mock Switch
|
||||
MOCK_SWITCH=false
|
||||
# Marketplace configuration
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||||
|
||||
# CODE EXECUTION CONFIGURATION
|
||||
CODE_EXECUTION_ENDPOINT=
|
||||
CODE_EXECUTION_API_KEY=
|
||||
# Endpoint configuration
|
||||
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||||
|
||||
# Volcengine MaaS Credentials
|
||||
VOLC_API_KEY=
|
||||
VOLC_SECRET_KEY=
|
||||
VOLC_MODEL_ENDPOINT_ID=
|
||||
VOLC_EMBEDDING_ENDPOINT_ID=
|
||||
# Reset password token expiry minutes
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||||
|
||||
# 360 AI Credentials
|
||||
ZHINAO_API_KEY=
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||||
|
||||
# Plugin configuration
|
||||
PLUGIN_DAEMON_KEY=
|
||||
PLUGIN_DAEMON_URL=
|
||||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||||
MAX_SUBMIT_COUNT=100
|
||||
# Lockout duration in seconds
|
||||
LOGIN_LOCKOUT_DURATION=86400
|
||||
|
||||
# Marketplace configuration
|
||||
MARKETPLACE_API_URL=
|
||||
# VESSL AI Credentials
|
||||
VESSL_AI_MODEL_NAME=
|
||||
VESSL_AI_API_KEY=
|
||||
VESSL_AI_ENDPOINT_URL=
|
||||
|
||||
# GPUStack Credentials
|
||||
GPUSTACK_SERVER_URL=
|
||||
GPUSTACK_API_KEY=
|
||||
|
||||
# Gitee AI Credentials
|
||||
GITEE_AI_API_KEY=
|
||||
|
||||
# xAI Credentials
|
||||
XAI_API_KEY=
|
||||
XAI_API_BASE=
|
||||
HTTP_PROXY='http://127.0.0.1:1092'
|
||||
HTTPS_PROXY='http://127.0.0.1:1092'
|
||||
NO_PROXY='localhost,127.0.0.1'
|
||||
LOG_LEVEL=INFO
|
||||
|
||||
@ -1,19 +1,91 @@
|
||||
import os
|
||||
import pathlib
|
||||
import random
|
||||
import secrets
|
||||
from collections.abc import Generator
|
||||
|
||||
# Getting the absolute path of the current file's directory
|
||||
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
import pytest
|
||||
from flask import Flask
|
||||
from flask.testing import FlaskClient
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
# Getting the absolute path of the project's root directory
|
||||
PROJECT_DIR = os.path.abspath(os.path.join(ABS_PATH, os.pardir, os.pardir))
|
||||
from app_factory import create_app
|
||||
from models import Account, DifySetup, Tenant, TenantAccountJoin, db
|
||||
from services.account_service import AccountService, RegisterService
|
||||
|
||||
|
||||
# Loading the .env file if it exists
|
||||
def _load_env() -> None:
|
||||
dotenv_path = os.path.join(PROJECT_DIR, "tests", "integration_tests", ".env")
|
||||
if os.path.exists(dotenv_path):
|
||||
current_file_path = pathlib.Path(__file__).absolute()
|
||||
# Items later in the list have higher precedence.
|
||||
files_to_load = [".env", "vdb.env"]
|
||||
|
||||
env_file_paths = [current_file_path.parent / i for i in files_to_load]
|
||||
for path in env_file_paths:
|
||||
if not path.exists():
|
||||
continue
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv(dotenv_path)
|
||||
# Set `override=True` to ensure values from `vdb.env` take priority over values from `.env`
|
||||
load_dotenv(str(path), override=True)
|
||||
|
||||
|
||||
_load_env()
|
||||
|
||||
_CACHED_APP = create_app()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def flask_app() -> Flask:
|
||||
return _CACHED_APP
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def setup_account(request) -> Generator[Account, None, None]:
|
||||
"""`dify_setup` completes the setup process for the Dify application.
|
||||
|
||||
It creates `Account` and `Tenant`, and inserts a `DifySetup` record into the database.
|
||||
|
||||
Most tests in the `controllers` package may require dify has been successfully setup.
|
||||
"""
|
||||
with _CACHED_APP.test_request_context():
|
||||
rand_suffix = random.randint(int(1e6), int(1e7)) # noqa
|
||||
name = f"test-user-{rand_suffix}"
|
||||
email = f"{name}@example.com"
|
||||
RegisterService.setup(
|
||||
email=email,
|
||||
name=name,
|
||||
password=secrets.token_hex(16),
|
||||
ip_address="localhost",
|
||||
)
|
||||
|
||||
with _CACHED_APP.test_request_context():
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
account = session.query(Account).filter_by(email=email).one()
|
||||
|
||||
yield account
|
||||
|
||||
with _CACHED_APP.test_request_context():
|
||||
db.session.query(DifySetup).delete()
|
||||
db.session.query(TenantAccountJoin).delete()
|
||||
db.session.query(Account).delete()
|
||||
db.session.query(Tenant).delete()
|
||||
db.session.commit()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def flask_req_ctx():
|
||||
with _CACHED_APP.test_request_context():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def auth_header(setup_account) -> dict[str, str]:
|
||||
token = AccountService.get_account_jwt_token(setup_account)
|
||||
return {"Authorization": f"Bearer {token}"}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_client() -> Generator[FlaskClient, None, None]:
|
||||
with _CACHED_APP.test_client() as client:
|
||||
yield client
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from app_factory import create_app
|
||||
from configs import dify_config
|
||||
|
||||
mock_user = type(
|
||||
"MockUser",
|
||||
(object,),
|
||||
{
|
||||
"is_authenticated": True,
|
||||
"id": "123",
|
||||
"is_editor": True,
|
||||
"is_dataset_editor": True,
|
||||
"status": "active",
|
||||
"get_id": "123",
|
||||
"current_tenant_id": "9d2074fc-6f86-45a9-b09d-6ecc63b9056b",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def app():
|
||||
app = create_app()
|
||||
dify_config.LOGIN_DISABLED = True
|
||||
return app
|
||||
@ -0,0 +1,47 @@
|
||||
import uuid
|
||||
from unittest import mock
|
||||
|
||||
from controllers.console.app import workflow_draft_variable as draft_variable_api
|
||||
from controllers.console.app import wraps
|
||||
from factories.variable_factory import build_segment
|
||||
from models import App, AppMode
|
||||
from models.workflow import WorkflowDraftVariable
|
||||
from services.workflow_draft_variable_service import WorkflowDraftVariableList, WorkflowDraftVariableService
|
||||
|
||||
|
||||
def _get_mock_srv_class() -> type[WorkflowDraftVariableService]:
|
||||
return mock.create_autospec(WorkflowDraftVariableService)
|
||||
|
||||
|
||||
class TestWorkflowDraftNodeVariableListApi:
|
||||
def test_get(self, test_client, auth_header, monkeypatch):
|
||||
srv_class = _get_mock_srv_class()
|
||||
mock_app_model: App = App()
|
||||
mock_app_model.id = str(uuid.uuid4())
|
||||
test_node_id = "test_node_id"
|
||||
mock_app_model.mode = AppMode.ADVANCED_CHAT
|
||||
mock_load_app_model = mock.Mock(return_value=mock_app_model)
|
||||
|
||||
monkeypatch.setattr(draft_variable_api, "WorkflowDraftVariableService", srv_class)
|
||||
monkeypatch.setattr(wraps, "_load_app_model", mock_load_app_model)
|
||||
|
||||
var1 = WorkflowDraftVariable.new_node_variable(
|
||||
app_id="test_app_1",
|
||||
node_id="test_node_1",
|
||||
name="str_var",
|
||||
value=build_segment("str_value"),
|
||||
node_execution_id=str(uuid.uuid4()),
|
||||
)
|
||||
srv_instance = mock.create_autospec(WorkflowDraftVariableService, instance=True)
|
||||
srv_class.return_value = srv_instance
|
||||
srv_instance.list_node_variables.return_value = WorkflowDraftVariableList(variables=[var1])
|
||||
|
||||
response = test_client.get(
|
||||
f"/console/api/apps/{mock_app_model.id}/workflows/draft/nodes/{test_node_id}/variables",
|
||||
headers=auth_header,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
response_dict = response.json
|
||||
assert isinstance(response_dict, dict)
|
||||
assert "items" in response_dict
|
||||
assert len(response_dict["items"]) == 1
|
||||
@ -1,9 +0,0 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from app_fixture import mock_user # type: ignore
|
||||
|
||||
|
||||
def test_post_requires_login(app):
|
||||
with app.test_client() as client, patch("flask_login.utils._get_user", mock_user):
|
||||
response = client.get("/console/api/data-source/integrates")
|
||||
assert response.status_code == 200
|
||||
@ -0,0 +1,371 @@
|
||||
import unittest
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional
|
||||
from unittest.mock import patch
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from extensions.ext_database import db
|
||||
from factories.file_factory import StorageKeyLoader
|
||||
from models import ToolFile, UploadFile
|
||||
from models.enums import CreatorUserRole
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("flask_req_ctx")
|
||||
class TestStorageKeyLoader(unittest.TestCase):
|
||||
"""
|
||||
Integration tests for StorageKeyLoader class.
|
||||
|
||||
Tests the batched loading of storage keys from the database for files
|
||||
with different transfer methods: LOCAL_FILE, REMOTE_URL, and TOOL_FILE.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test data before each test method."""
|
||||
self.session = db.session()
|
||||
self.tenant_id = str(uuid4())
|
||||
self.user_id = str(uuid4())
|
||||
self.conversation_id = str(uuid4())
|
||||
|
||||
# Create test data that will be cleaned up after each test
|
||||
self.test_upload_files = []
|
||||
self.test_tool_files = []
|
||||
|
||||
# Create StorageKeyLoader instance
|
||||
self.loader = StorageKeyLoader(self.session, self.tenant_id)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up test data after each test method."""
|
||||
self.session.rollback()
|
||||
|
||||
def _create_upload_file(
|
||||
self, file_id: Optional[str] = None, storage_key: Optional[str] = None, tenant_id: Optional[str] = None
|
||||
) -> UploadFile:
|
||||
"""Helper method to create an UploadFile record for testing."""
|
||||
if file_id is None:
|
||||
file_id = str(uuid4())
|
||||
if storage_key is None:
|
||||
storage_key = f"test_storage_key_{uuid4()}"
|
||||
if tenant_id is None:
|
||||
tenant_id = self.tenant_id
|
||||
|
||||
upload_file = UploadFile(
|
||||
tenant_id=tenant_id,
|
||||
storage_type="local",
|
||||
key=storage_key,
|
||||
name="test_file.txt",
|
||||
size=1024,
|
||||
extension=".txt",
|
||||
mime_type="text/plain",
|
||||
created_by_role=CreatorUserRole.ACCOUNT,
|
||||
created_by=self.user_id,
|
||||
created_at=datetime.now(UTC),
|
||||
used=False,
|
||||
)
|
||||
upload_file.id = file_id
|
||||
|
||||
self.session.add(upload_file)
|
||||
self.session.flush()
|
||||
self.test_upload_files.append(upload_file)
|
||||
|
||||
return upload_file
|
||||
|
||||
def _create_tool_file(
|
||||
self, file_id: Optional[str] = None, file_key: Optional[str] = None, tenant_id: Optional[str] = None
|
||||
) -> ToolFile:
|
||||
"""Helper method to create a ToolFile record for testing."""
|
||||
if file_id is None:
|
||||
file_id = str(uuid4())
|
||||
if file_key is None:
|
||||
file_key = f"test_file_key_{uuid4()}"
|
||||
if tenant_id is None:
|
||||
tenant_id = self.tenant_id
|
||||
|
||||
tool_file = ToolFile()
|
||||
tool_file.id = file_id
|
||||
tool_file.user_id = self.user_id
|
||||
tool_file.tenant_id = tenant_id
|
||||
tool_file.conversation_id = self.conversation_id
|
||||
tool_file.file_key = file_key
|
||||
tool_file.mimetype = "text/plain"
|
||||
tool_file.original_url = "http://example.com/file.txt"
|
||||
tool_file.name = "test_tool_file.txt"
|
||||
tool_file.size = 2048
|
||||
|
||||
self.session.add(tool_file)
|
||||
self.session.flush()
|
||||
self.test_tool_files.append(tool_file)
|
||||
|
||||
return tool_file
|
||||
|
||||
def _create_file(
|
||||
self, related_id: str, transfer_method: FileTransferMethod, tenant_id: Optional[str] = None
|
||||
) -> File:
|
||||
"""Helper method to create a File object for testing."""
|
||||
if tenant_id is None:
|
||||
tenant_id = self.tenant_id
|
||||
|
||||
# Set related_id for LOCAL_FILE and TOOL_FILE transfer methods
|
||||
file_related_id = None
|
||||
remote_url = None
|
||||
|
||||
if transfer_method in (FileTransferMethod.LOCAL_FILE, FileTransferMethod.TOOL_FILE):
|
||||
file_related_id = related_id
|
||||
elif transfer_method == FileTransferMethod.REMOTE_URL:
|
||||
remote_url = "https://example.com/test_file.txt"
|
||||
file_related_id = related_id
|
||||
|
||||
return File(
|
||||
id=str(uuid4()), # Generate new UUID for File.id
|
||||
tenant_id=tenant_id,
|
||||
type=FileType.DOCUMENT,
|
||||
transfer_method=transfer_method,
|
||||
related_id=file_related_id,
|
||||
remote_url=remote_url,
|
||||
filename="test_file.txt",
|
||||
extension=".txt",
|
||||
mime_type="text/plain",
|
||||
size=1024,
|
||||
storage_key="initial_key",
|
||||
)
|
||||
|
||||
def test_load_storage_keys_local_file(self):
|
||||
"""Test loading storage keys for LOCAL_FILE transfer method."""
|
||||
# Create test data
|
||||
upload_file = self._create_upload_file()
|
||||
file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
|
||||
# Load storage keys
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
# Verify storage key was loaded correctly
|
||||
assert file._storage_key == upload_file.key
|
||||
|
||||
def test_load_storage_keys_remote_url(self):
|
||||
"""Test loading storage keys for REMOTE_URL transfer method."""
|
||||
# Create test data
|
||||
upload_file = self._create_upload_file()
|
||||
file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.REMOTE_URL)
|
||||
|
||||
# Load storage keys
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
# Verify storage key was loaded correctly
|
||||
assert file._storage_key == upload_file.key
|
||||
|
||||
def test_load_storage_keys_tool_file(self):
|
||||
"""Test loading storage keys for TOOL_FILE transfer method."""
|
||||
# Create test data
|
||||
tool_file = self._create_tool_file()
|
||||
file = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE)
|
||||
|
||||
# Load storage keys
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
# Verify storage key was loaded correctly
|
||||
assert file._storage_key == tool_file.file_key
|
||||
|
||||
def test_load_storage_keys_mixed_methods(self):
|
||||
"""Test batch loading with mixed transfer methods."""
|
||||
# Create test data for different transfer methods
|
||||
upload_file1 = self._create_upload_file()
|
||||
upload_file2 = self._create_upload_file()
|
||||
tool_file = self._create_tool_file()
|
||||
|
||||
file1 = self._create_file(related_id=upload_file1.id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
file2 = self._create_file(related_id=upload_file2.id, transfer_method=FileTransferMethod.REMOTE_URL)
|
||||
file3 = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE)
|
||||
|
||||
files = [file1, file2, file3]
|
||||
|
||||
# Load storage keys
|
||||
self.loader.load_storage_keys(files)
|
||||
|
||||
# Verify all storage keys were loaded correctly
|
||||
assert file1._storage_key == upload_file1.key
|
||||
assert file2._storage_key == upload_file2.key
|
||||
assert file3._storage_key == tool_file.file_key
|
||||
|
||||
def test_load_storage_keys_empty_list(self):
|
||||
"""Test with empty file list."""
|
||||
# Should not raise any exceptions
|
||||
self.loader.load_storage_keys([])
|
||||
|
||||
def test_load_storage_keys_tenant_mismatch(self):
|
||||
"""Test tenant_id validation."""
|
||||
# Create file with different tenant_id
|
||||
upload_file = self._create_upload_file()
|
||||
file = self._create_file(
|
||||
related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=str(uuid4())
|
||||
)
|
||||
|
||||
# Should raise ValueError for tenant mismatch
|
||||
with pytest.raises(ValueError) as context:
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
assert "invalid file, expected tenant_id" in str(context.value)
|
||||
|
||||
def test_load_storage_keys_missing_file_id(self):
|
||||
"""Test with None file.related_id."""
|
||||
# Create a file with valid parameters first, then manually set related_id to None
|
||||
file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
file.related_id = None
|
||||
|
||||
# Should raise ValueError for None file related_id
|
||||
with pytest.raises(ValueError) as context:
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
assert str(context.value) == "file id should not be None."
|
||||
|
||||
def test_load_storage_keys_nonexistent_upload_file_records(self):
|
||||
"""Test with missing UploadFile database records."""
|
||||
# Create file with non-existent upload file id
|
||||
non_existent_id = str(uuid4())
|
||||
file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
|
||||
# Should raise ValueError for missing record
|
||||
with pytest.raises(ValueError):
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
def test_load_storage_keys_nonexistent_tool_file_records(self):
|
||||
"""Test with missing ToolFile database records."""
|
||||
# Create file with non-existent tool file id
|
||||
non_existent_id = str(uuid4())
|
||||
file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.TOOL_FILE)
|
||||
|
||||
# Should raise ValueError for missing record
|
||||
with pytest.raises(ValueError):
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
def test_load_storage_keys_invalid_uuid(self):
|
||||
"""Test with invalid UUID format."""
|
||||
# Create a file with valid parameters first, then manually set invalid related_id
|
||||
file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
file.related_id = "invalid-uuid-format"
|
||||
|
||||
# Should raise ValueError for invalid UUID
|
||||
with pytest.raises(ValueError):
|
||||
self.loader.load_storage_keys([file])
|
||||
|
||||
def test_load_storage_keys_batch_efficiency(self):
|
||||
"""Test batched operations use efficient queries."""
|
||||
# Create multiple files of different types
|
||||
upload_files = [self._create_upload_file() for _ in range(3)]
|
||||
tool_files = [self._create_tool_file() for _ in range(2)]
|
||||
|
||||
files = []
|
||||
files.extend(
|
||||
[self._create_file(related_id=uf.id, transfer_method=FileTransferMethod.LOCAL_FILE) for uf in upload_files]
|
||||
)
|
||||
files.extend(
|
||||
[self._create_file(related_id=tf.id, transfer_method=FileTransferMethod.TOOL_FILE) for tf in tool_files]
|
||||
)
|
||||
|
||||
# Mock the session to count queries
|
||||
with patch.object(self.session, "scalars", wraps=self.session.scalars) as mock_scalars:
|
||||
self.loader.load_storage_keys(files)
|
||||
|
||||
# Should make exactly 2 queries (one for upload_files, one for tool_files)
|
||||
assert mock_scalars.call_count == 2
|
||||
|
||||
# Verify all storage keys were loaded correctly
|
||||
for i, file in enumerate(files[:3]):
|
||||
assert file._storage_key == upload_files[i].key
|
||||
for i, file in enumerate(files[3:]):
|
||||
assert file._storage_key == tool_files[i].file_key
|
||||
|
||||
def test_load_storage_keys_tenant_isolation(self):
|
||||
"""Test that tenant isolation works correctly."""
|
||||
# Create files for different tenants
|
||||
other_tenant_id = str(uuid4())
|
||||
|
||||
# Create upload file for current tenant
|
||||
upload_file_current = self._create_upload_file()
|
||||
file_current = self._create_file(
|
||||
related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE
|
||||
)
|
||||
|
||||
# Create upload file for other tenant (but don't add to cleanup list)
|
||||
upload_file_other = UploadFile(
|
||||
tenant_id=other_tenant_id,
|
||||
storage_type="local",
|
||||
key="other_tenant_key",
|
||||
name="other_file.txt",
|
||||
size=1024,
|
||||
extension=".txt",
|
||||
mime_type="text/plain",
|
||||
created_by_role=CreatorUserRole.ACCOUNT,
|
||||
created_by=self.user_id,
|
||||
created_at=datetime.now(UTC),
|
||||
used=False,
|
||||
)
|
||||
upload_file_other.id = str(uuid4())
|
||||
self.session.add(upload_file_other)
|
||||
self.session.flush()
|
||||
|
||||
# Create file for other tenant but try to load with current tenant's loader
|
||||
file_other = self._create_file(
|
||||
related_id=upload_file_other.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id
|
||||
)
|
||||
|
||||
# Should raise ValueError due to tenant mismatch
|
||||
with pytest.raises(ValueError) as context:
|
||||
self.loader.load_storage_keys([file_other])
|
||||
|
||||
assert "invalid file, expected tenant_id" in str(context.value)
|
||||
|
||||
# Current tenant's file should still work
|
||||
self.loader.load_storage_keys([file_current])
|
||||
assert file_current._storage_key == upload_file_current.key
|
||||
|
||||
def test_load_storage_keys_mixed_tenant_batch(self):
|
||||
"""Test batch with mixed tenant files (should fail on first mismatch)."""
|
||||
# Create files for current tenant
|
||||
upload_file_current = self._create_upload_file()
|
||||
file_current = self._create_file(
|
||||
related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE
|
||||
)
|
||||
|
||||
# Create file for different tenant
|
||||
other_tenant_id = str(uuid4())
|
||||
file_other = self._create_file(
|
||||
related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id
|
||||
)
|
||||
|
||||
# Should raise ValueError on tenant mismatch
|
||||
with pytest.raises(ValueError) as context:
|
||||
self.loader.load_storage_keys([file_current, file_other])
|
||||
|
||||
assert "invalid file, expected tenant_id" in str(context.value)
|
||||
|
||||
def test_load_storage_keys_duplicate_file_ids(self):
|
||||
"""Test handling of duplicate file IDs in the batch."""
|
||||
# Create upload file
|
||||
upload_file = self._create_upload_file()
|
||||
|
||||
# Create two File objects with same related_id
|
||||
file1 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
file2 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
|
||||
# Should handle duplicates gracefully
|
||||
self.loader.load_storage_keys([file1, file2])
|
||||
|
||||
# Both files should have the same storage key
|
||||
assert file1._storage_key == upload_file.key
|
||||
assert file2._storage_key == upload_file.key
|
||||
|
||||
def test_load_storage_keys_session_isolation(self):
|
||||
"""Test that the loader uses the provided session correctly."""
|
||||
# Create test data
|
||||
upload_file = self._create_upload_file()
|
||||
file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE)
|
||||
|
||||
# Create loader with different session (same underlying connection)
|
||||
|
||||
with Session(bind=db.engine) as other_session:
|
||||
other_loader = StorageKeyLoader(other_session, self.tenant_id)
|
||||
with pytest.raises(ValueError):
|
||||
other_loader.load_storage_keys([file])
|
||||
@ -0,0 +1,501 @@
|
||||
import json
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from core.variables.variables import StringVariable
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from core.workflow.nodes import NodeType
|
||||
from factories.variable_factory import build_segment
|
||||
from libs import datetime_utils
|
||||
from models import db
|
||||
from models.workflow import Workflow, WorkflowDraftVariable, WorkflowNodeExecutionModel
|
||||
from services.workflow_draft_variable_service import DraftVarLoader, VariableResetError, WorkflowDraftVariableService
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("flask_req_ctx")
|
||||
class TestWorkflowDraftVariableService(unittest.TestCase):
|
||||
_test_app_id: str
|
||||
_session: Session
|
||||
_node1_id = "test_node_1"
|
||||
_node2_id = "test_node_2"
|
||||
_node_exec_id = str(uuid.uuid4())
|
||||
|
||||
def setUp(self):
|
||||
self._test_app_id = str(uuid.uuid4())
|
||||
self._session: Session = db.session()
|
||||
sys_var = WorkflowDraftVariable.new_sys_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="sys_var",
|
||||
value=build_segment("sys_value"),
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
conv_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="conv_var",
|
||||
value=build_segment("conv_value"),
|
||||
)
|
||||
node2_vars = [
|
||||
WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node2_id,
|
||||
name="int_var",
|
||||
value=build_segment(1),
|
||||
visible=False,
|
||||
node_execution_id=self._node_exec_id,
|
||||
),
|
||||
WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node2_id,
|
||||
name="str_var",
|
||||
value=build_segment("str_value"),
|
||||
visible=True,
|
||||
node_execution_id=self._node_exec_id,
|
||||
),
|
||||
]
|
||||
node1_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node1_id,
|
||||
name="str_var",
|
||||
value=build_segment("str_value"),
|
||||
visible=True,
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
_variables = list(node2_vars)
|
||||
_variables.extend(
|
||||
[
|
||||
node1_var,
|
||||
sys_var,
|
||||
conv_var,
|
||||
]
|
||||
)
|
||||
|
||||
db.session.add_all(_variables)
|
||||
db.session.flush()
|
||||
self._variable_ids = [v.id for v in _variables]
|
||||
self._node1_str_var_id = node1_var.id
|
||||
self._sys_var_id = sys_var.id
|
||||
self._conv_var_id = conv_var.id
|
||||
self._node2_var_ids = [v.id for v in node2_vars]
|
||||
|
||||
def _get_test_srv(self) -> WorkflowDraftVariableService:
|
||||
return WorkflowDraftVariableService(session=self._session)
|
||||
|
||||
def tearDown(self):
|
||||
self._session.rollback()
|
||||
|
||||
def test_list_variables(self):
|
||||
srv = self._get_test_srv()
|
||||
var_list = srv.list_variables_without_values(self._test_app_id, page=1, limit=2)
|
||||
assert var_list.total == 5
|
||||
assert len(var_list.variables) == 2
|
||||
page1_var_ids = {v.id for v in var_list.variables}
|
||||
assert page1_var_ids.issubset(self._variable_ids)
|
||||
|
||||
var_list_2 = srv.list_variables_without_values(self._test_app_id, page=2, limit=2)
|
||||
assert var_list_2.total is None
|
||||
assert len(var_list_2.variables) == 2
|
||||
page2_var_ids = {v.id for v in var_list_2.variables}
|
||||
assert page2_var_ids.isdisjoint(page1_var_ids)
|
||||
assert page2_var_ids.issubset(self._variable_ids)
|
||||
|
||||
def test_get_node_variable(self):
|
||||
srv = self._get_test_srv()
|
||||
node_var = srv.get_node_variable(self._test_app_id, self._node1_id, "str_var")
|
||||
assert node_var is not None
|
||||
assert node_var.id == self._node1_str_var_id
|
||||
assert node_var.name == "str_var"
|
||||
assert node_var.get_value() == build_segment("str_value")
|
||||
|
||||
def test_get_system_variable(self):
|
||||
srv = self._get_test_srv()
|
||||
sys_var = srv.get_system_variable(self._test_app_id, "sys_var")
|
||||
assert sys_var is not None
|
||||
assert sys_var.id == self._sys_var_id
|
||||
assert sys_var.name == "sys_var"
|
||||
assert sys_var.get_value() == build_segment("sys_value")
|
||||
|
||||
def test_get_conversation_variable(self):
|
||||
srv = self._get_test_srv()
|
||||
conv_var = srv.get_conversation_variable(self._test_app_id, "conv_var")
|
||||
assert conv_var is not None
|
||||
assert conv_var.id == self._conv_var_id
|
||||
assert conv_var.name == "conv_var"
|
||||
assert conv_var.get_value() == build_segment("conv_value")
|
||||
|
||||
def test_delete_node_variables(self):
|
||||
srv = self._get_test_srv()
|
||||
srv.delete_node_variables(self._test_app_id, self._node2_id)
|
||||
node2_var_count = (
|
||||
self._session.query(WorkflowDraftVariable)
|
||||
.where(
|
||||
WorkflowDraftVariable.app_id == self._test_app_id,
|
||||
WorkflowDraftVariable.node_id == self._node2_id,
|
||||
)
|
||||
.count()
|
||||
)
|
||||
assert node2_var_count == 0
|
||||
|
||||
def test_delete_variable(self):
|
||||
srv = self._get_test_srv()
|
||||
node_1_var = (
|
||||
self._session.query(WorkflowDraftVariable).where(WorkflowDraftVariable.id == self._node1_str_var_id).one()
|
||||
)
|
||||
srv.delete_variable(node_1_var)
|
||||
exists = bool(
|
||||
self._session.query(WorkflowDraftVariable).where(WorkflowDraftVariable.id == self._node1_str_var_id).first()
|
||||
)
|
||||
assert exists is False
|
||||
|
||||
def test__list_node_variables(self):
|
||||
srv = self._get_test_srv()
|
||||
node_vars = srv._list_node_variables(self._test_app_id, self._node2_id)
|
||||
assert len(node_vars.variables) == 2
|
||||
assert {v.id for v in node_vars.variables} == set(self._node2_var_ids)
|
||||
|
||||
def test_get_draft_variables_by_selectors(self):
|
||||
srv = self._get_test_srv()
|
||||
selectors = [
|
||||
[self._node1_id, "str_var"],
|
||||
[self._node2_id, "str_var"],
|
||||
[self._node2_id, "int_var"],
|
||||
]
|
||||
variables = srv.get_draft_variables_by_selectors(self._test_app_id, selectors)
|
||||
assert len(variables) == 3
|
||||
assert {v.id for v in variables} == {self._node1_str_var_id} | set(self._node2_var_ids)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("flask_req_ctx")
|
||||
class TestDraftVariableLoader(unittest.TestCase):
|
||||
_test_app_id: str
|
||||
_test_tenant_id: str
|
||||
|
||||
_node1_id = "test_loader_node_1"
|
||||
_node_exec_id = str(uuid.uuid4())
|
||||
|
||||
def setUp(self):
|
||||
self._test_app_id = str(uuid.uuid4())
|
||||
self._test_tenant_id = str(uuid.uuid4())
|
||||
sys_var = WorkflowDraftVariable.new_sys_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="sys_var",
|
||||
value=build_segment("sys_value"),
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
conv_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="conv_var",
|
||||
value=build_segment("conv_value"),
|
||||
)
|
||||
node_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node1_id,
|
||||
name="str_var",
|
||||
value=build_segment("str_value"),
|
||||
visible=True,
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
_variables = [
|
||||
node_var,
|
||||
sys_var,
|
||||
conv_var,
|
||||
]
|
||||
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
session.add_all(_variables)
|
||||
session.flush()
|
||||
session.commit()
|
||||
self._variable_ids = [v.id for v in _variables]
|
||||
self._node_var_id = node_var.id
|
||||
self._sys_var_id = sys_var.id
|
||||
self._conv_var_id = conv_var.id
|
||||
|
||||
def tearDown(self):
|
||||
with Session(bind=db.engine, expire_on_commit=False) as session:
|
||||
session.query(WorkflowDraftVariable).filter(WorkflowDraftVariable.app_id == self._test_app_id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
session.commit()
|
||||
|
||||
def test_variable_loader_with_empty_selector(self):
|
||||
var_loader = DraftVarLoader(engine=db.engine, app_id=self._test_app_id, tenant_id=self._test_tenant_id)
|
||||
variables = var_loader.load_variables([])
|
||||
assert len(variables) == 0
|
||||
|
||||
def test_variable_loader_with_non_empty_selector(self):
|
||||
var_loader = DraftVarLoader(engine=db.engine, app_id=self._test_app_id, tenant_id=self._test_tenant_id)
|
||||
variables = var_loader.load_variables(
|
||||
[
|
||||
[SYSTEM_VARIABLE_NODE_ID, "sys_var"],
|
||||
[CONVERSATION_VARIABLE_NODE_ID, "conv_var"],
|
||||
[self._node1_id, "str_var"],
|
||||
]
|
||||
)
|
||||
assert len(variables) == 3
|
||||
conv_var = next(v for v in variables if v.selector[0] == CONVERSATION_VARIABLE_NODE_ID)
|
||||
assert conv_var.id == self._conv_var_id
|
||||
sys_var = next(v for v in variables if v.selector[0] == SYSTEM_VARIABLE_NODE_ID)
|
||||
assert sys_var.id == self._sys_var_id
|
||||
node1_var = next(v for v in variables if v.selector[0] == self._node1_id)
|
||||
assert node1_var.id == self._node_var_id
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("flask_req_ctx")
|
||||
class TestWorkflowDraftVariableServiceResetVariable(unittest.TestCase):
|
||||
"""Integration tests for reset_variable functionality using real database"""
|
||||
|
||||
_test_app_id: str
|
||||
_test_tenant_id: str
|
||||
_test_workflow_id: str
|
||||
_session: Session
|
||||
_node_id = "test_reset_node"
|
||||
_node_exec_id: str
|
||||
_workflow_node_exec_id: str
|
||||
|
||||
def setUp(self):
|
||||
self._test_app_id = str(uuid.uuid4())
|
||||
self._test_tenant_id = str(uuid.uuid4())
|
||||
self._test_workflow_id = str(uuid.uuid4())
|
||||
self._node_exec_id = str(uuid.uuid4())
|
||||
self._workflow_node_exec_id = str(uuid.uuid4())
|
||||
self._session: Session = db.session()
|
||||
|
||||
# Create a workflow node execution record with outputs
|
||||
# Note: The WorkflowNodeExecutionModel.id should match the node_execution_id in WorkflowDraftVariable
|
||||
self._workflow_node_execution = WorkflowNodeExecutionModel(
|
||||
id=self._node_exec_id, # This should match the node_execution_id in the variable
|
||||
tenant_id=self._test_tenant_id,
|
||||
app_id=self._test_app_id,
|
||||
workflow_id=self._test_workflow_id,
|
||||
triggered_from="workflow-run",
|
||||
workflow_run_id=str(uuid.uuid4()),
|
||||
index=1,
|
||||
node_execution_id=self._node_exec_id,
|
||||
node_id=self._node_id,
|
||||
node_type=NodeType.LLM.value,
|
||||
title="Test Node",
|
||||
inputs='{"input": "test input"}',
|
||||
process_data='{"test_var": "process_value", "other_var": "other_process"}',
|
||||
outputs='{"test_var": "output_value", "other_var": "other_output"}',
|
||||
status="succeeded",
|
||||
elapsed_time=1.5,
|
||||
created_by_role="account",
|
||||
created_by=str(uuid.uuid4()),
|
||||
)
|
||||
|
||||
# Create conversation variables for the workflow
|
||||
self._conv_variables = [
|
||||
StringVariable(
|
||||
id=str(uuid.uuid4()),
|
||||
name="conv_var_1",
|
||||
description="Test conversation variable 1",
|
||||
value="default_value_1",
|
||||
),
|
||||
StringVariable(
|
||||
id=str(uuid.uuid4()),
|
||||
name="conv_var_2",
|
||||
description="Test conversation variable 2",
|
||||
value="default_value_2",
|
||||
),
|
||||
]
|
||||
|
||||
# Create test variables
|
||||
self._node_var_with_exec = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node_id,
|
||||
name="test_var",
|
||||
value=build_segment("old_value"),
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
self._node_var_with_exec.last_edited_at = datetime_utils.naive_utc_now()
|
||||
|
||||
self._node_var_without_exec = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node_id,
|
||||
name="no_exec_var",
|
||||
value=build_segment("some_value"),
|
||||
node_execution_id="temp_exec_id",
|
||||
)
|
||||
# Manually set node_execution_id to None after creation
|
||||
self._node_var_without_exec.node_execution_id = None
|
||||
|
||||
self._node_var_missing_exec = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=self._test_app_id,
|
||||
node_id=self._node_id,
|
||||
name="missing_exec_var",
|
||||
value=build_segment("some_value"),
|
||||
node_execution_id=str(uuid.uuid4()), # Use a valid UUID that doesn't exist in database
|
||||
)
|
||||
|
||||
self._conv_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="conv_var_1",
|
||||
value=build_segment("old_conv_value"),
|
||||
)
|
||||
self._conv_var.last_edited_at = datetime_utils.naive_utc_now()
|
||||
|
||||
# Add all to database
|
||||
db.session.add_all(
|
||||
[
|
||||
self._workflow_node_execution,
|
||||
self._node_var_with_exec,
|
||||
self._node_var_without_exec,
|
||||
self._node_var_missing_exec,
|
||||
self._conv_var,
|
||||
]
|
||||
)
|
||||
db.session.flush()
|
||||
|
||||
# Store IDs for assertions
|
||||
self._node_var_with_exec_id = self._node_var_with_exec.id
|
||||
self._node_var_without_exec_id = self._node_var_without_exec.id
|
||||
self._node_var_missing_exec_id = self._node_var_missing_exec.id
|
||||
self._conv_var_id = self._conv_var.id
|
||||
|
||||
def _get_test_srv(self) -> WorkflowDraftVariableService:
|
||||
return WorkflowDraftVariableService(session=self._session)
|
||||
|
||||
def _create_mock_workflow(self) -> Workflow:
|
||||
"""Create a real workflow with conversation variables and graph"""
|
||||
conversation_vars = self._conv_variables
|
||||
|
||||
# Create a simple graph with the test node
|
||||
graph = {
|
||||
"nodes": [{"id": "test_reset_node", "type": "llm", "title": "Test Node", "data": {"type": "llm"}}],
|
||||
"edges": [],
|
||||
}
|
||||
|
||||
workflow = Workflow.new(
|
||||
tenant_id=str(uuid.uuid4()),
|
||||
app_id=self._test_app_id,
|
||||
type="workflow",
|
||||
version="1.0",
|
||||
graph=json.dumps(graph),
|
||||
features="{}",
|
||||
created_by=str(uuid.uuid4()),
|
||||
environment_variables=[],
|
||||
conversation_variables=conversation_vars,
|
||||
)
|
||||
return workflow
|
||||
|
||||
def tearDown(self):
|
||||
self._session.rollback()
|
||||
|
||||
def test_reset_node_variable_with_valid_execution_record(self):
|
||||
"""Test resetting a node variable with valid execution record - should restore from execution"""
|
||||
srv = self._get_test_srv()
|
||||
mock_workflow = self._create_mock_workflow()
|
||||
|
||||
# Get the variable before reset
|
||||
variable = srv.get_variable(self._node_var_with_exec_id)
|
||||
assert variable is not None
|
||||
assert variable.get_value().value == "old_value"
|
||||
assert variable.last_edited_at is not None
|
||||
|
||||
# Reset the variable
|
||||
result = srv.reset_variable(mock_workflow, variable)
|
||||
|
||||
# Should return the updated variable
|
||||
assert result is not None
|
||||
assert result.id == self._node_var_with_exec_id
|
||||
assert result.node_execution_id == self._workflow_node_execution.id
|
||||
assert result.last_edited_at is None # Should be reset to None
|
||||
|
||||
# The returned variable should have the updated value from execution record
|
||||
assert result.get_value().value == "output_value"
|
||||
|
||||
# Verify the variable was updated in database
|
||||
updated_variable = srv.get_variable(self._node_var_with_exec_id)
|
||||
assert updated_variable is not None
|
||||
# The value should be updated from the execution record's outputs
|
||||
assert updated_variable.get_value().value == "output_value"
|
||||
assert updated_variable.last_edited_at is None
|
||||
assert updated_variable.node_execution_id == self._workflow_node_execution.id
|
||||
|
||||
def test_reset_node_variable_with_no_execution_id(self):
|
||||
"""Test resetting a node variable with no execution ID - should delete variable"""
|
||||
srv = self._get_test_srv()
|
||||
mock_workflow = self._create_mock_workflow()
|
||||
|
||||
# Get the variable before reset
|
||||
variable = srv.get_variable(self._node_var_without_exec_id)
|
||||
assert variable is not None
|
||||
|
||||
# Reset the variable
|
||||
result = srv.reset_variable(mock_workflow, variable)
|
||||
|
||||
# Should return None (variable deleted)
|
||||
assert result is None
|
||||
|
||||
# Verify the variable was deleted
|
||||
deleted_variable = srv.get_variable(self._node_var_without_exec_id)
|
||||
assert deleted_variable is None
|
||||
|
||||
def test_reset_node_variable_with_missing_execution_record(self):
|
||||
"""Test resetting a node variable when execution record doesn't exist"""
|
||||
srv = self._get_test_srv()
|
||||
mock_workflow = self._create_mock_workflow()
|
||||
|
||||
# Get the variable before reset
|
||||
variable = srv.get_variable(self._node_var_missing_exec_id)
|
||||
assert variable is not None
|
||||
|
||||
# Reset the variable
|
||||
result = srv.reset_variable(mock_workflow, variable)
|
||||
|
||||
# Should return None (variable deleted)
|
||||
assert result is None
|
||||
|
||||
# Verify the variable was deleted
|
||||
deleted_variable = srv.get_variable(self._node_var_missing_exec_id)
|
||||
assert deleted_variable is None
|
||||
|
||||
def test_reset_conversation_variable(self):
|
||||
"""Test resetting a conversation variable"""
|
||||
srv = self._get_test_srv()
|
||||
mock_workflow = self._create_mock_workflow()
|
||||
|
||||
# Get the variable before reset
|
||||
variable = srv.get_variable(self._conv_var_id)
|
||||
assert variable is not None
|
||||
assert variable.get_value().value == "old_conv_value"
|
||||
assert variable.last_edited_at is not None
|
||||
|
||||
# Reset the variable
|
||||
result = srv.reset_variable(mock_workflow, variable)
|
||||
|
||||
# Should return the updated variable
|
||||
assert result is not None
|
||||
assert result.id == self._conv_var_id
|
||||
assert result.last_edited_at is None # Should be reset to None
|
||||
|
||||
# Verify the variable was updated with default value from workflow
|
||||
updated_variable = srv.get_variable(self._conv_var_id)
|
||||
assert updated_variable is not None
|
||||
# The value should be updated from the workflow's conversation variable default
|
||||
assert updated_variable.get_value().value == "default_value_1"
|
||||
assert updated_variable.last_edited_at is None
|
||||
|
||||
def test_reset_system_variable_raises_error(self):
|
||||
"""Test that resetting a system variable raises an error"""
|
||||
srv = self._get_test_srv()
|
||||
mock_workflow = self._create_mock_workflow()
|
||||
|
||||
# Create a system variable
|
||||
sys_var = WorkflowDraftVariable.new_sys_variable(
|
||||
app_id=self._test_app_id,
|
||||
name="sys_var",
|
||||
value=build_segment("sys_value"),
|
||||
node_execution_id=self._node_exec_id,
|
||||
)
|
||||
db.session.add(sys_var)
|
||||
db.session.flush()
|
||||
|
||||
# Attempt to reset the system variable
|
||||
with pytest.raises(VariableResetError) as exc_info:
|
||||
srv.reset_variable(mock_workflow, sys_var)
|
||||
|
||||
assert "cannot reset system variable" in str(exc_info.value)
|
||||
assert sys_var.id in str(exc_info.value)
|
||||
@ -0,0 +1,302 @@
|
||||
import datetime
|
||||
import uuid
|
||||
from collections import OrderedDict
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from flask_restful import marshal
|
||||
|
||||
from controllers.console.app.workflow_draft_variable import (
|
||||
_WORKFLOW_DRAFT_VARIABLE_FIELDS,
|
||||
_WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS,
|
||||
_WORKFLOW_DRAFT_VARIABLE_LIST_WITHOUT_VALUE_FIELDS,
|
||||
_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS,
|
||||
)
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from factories.variable_factory import build_segment
|
||||
from models.workflow import WorkflowDraftVariable
|
||||
from services.workflow_draft_variable_service import WorkflowDraftVariableList
|
||||
|
||||
_TEST_APP_ID = "test_app_id"
|
||||
_TEST_NODE_EXEC_ID = str(uuid.uuid4())
|
||||
|
||||
|
||||
class TestWorkflowDraftVariableFields:
|
||||
def test_conversation_variable(self):
|
||||
conv_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=_TEST_APP_ID, name="conv_var", value=build_segment(1)
|
||||
)
|
||||
|
||||
conv_var.id = str(uuid.uuid4())
|
||||
conv_var.visible = True
|
||||
|
||||
expected_without_value: OrderedDict[str, Any] = OrderedDict(
|
||||
{
|
||||
"id": str(conv_var.id),
|
||||
"type": conv_var.get_variable_type().value,
|
||||
"name": "conv_var",
|
||||
"description": "",
|
||||
"selector": [CONVERSATION_VARIABLE_NODE_ID, "conv_var"],
|
||||
"value_type": "number",
|
||||
"edited": False,
|
||||
"visible": True,
|
||||
}
|
||||
)
|
||||
|
||||
assert marshal(conv_var, _WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS) == expected_without_value
|
||||
expected_with_value = expected_without_value.copy()
|
||||
expected_with_value["value"] = 1
|
||||
assert marshal(conv_var, _WORKFLOW_DRAFT_VARIABLE_FIELDS) == expected_with_value
|
||||
|
||||
def test_create_sys_variable(self):
|
||||
sys_var = WorkflowDraftVariable.new_sys_variable(
|
||||
app_id=_TEST_APP_ID,
|
||||
name="sys_var",
|
||||
value=build_segment("a"),
|
||||
editable=True,
|
||||
node_execution_id=_TEST_NODE_EXEC_ID,
|
||||
)
|
||||
|
||||
sys_var.id = str(uuid.uuid4())
|
||||
sys_var.last_edited_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
sys_var.visible = True
|
||||
|
||||
expected_without_value = OrderedDict(
|
||||
{
|
||||
"id": str(sys_var.id),
|
||||
"type": sys_var.get_variable_type().value,
|
||||
"name": "sys_var",
|
||||
"description": "",
|
||||
"selector": [SYSTEM_VARIABLE_NODE_ID, "sys_var"],
|
||||
"value_type": "string",
|
||||
"edited": True,
|
||||
"visible": True,
|
||||
}
|
||||
)
|
||||
assert marshal(sys_var, _WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS) == expected_without_value
|
||||
expected_with_value = expected_without_value.copy()
|
||||
expected_with_value["value"] = "a"
|
||||
assert marshal(sys_var, _WORKFLOW_DRAFT_VARIABLE_FIELDS) == expected_with_value
|
||||
|
||||
def test_node_variable(self):
|
||||
node_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=_TEST_APP_ID,
|
||||
node_id="test_node",
|
||||
name="node_var",
|
||||
value=build_segment([1, "a"]),
|
||||
visible=False,
|
||||
node_execution_id=_TEST_NODE_EXEC_ID,
|
||||
)
|
||||
|
||||
node_var.id = str(uuid.uuid4())
|
||||
node_var.last_edited_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
|
||||
expected_without_value: OrderedDict[str, Any] = OrderedDict(
|
||||
{
|
||||
"id": str(node_var.id),
|
||||
"type": node_var.get_variable_type().value,
|
||||
"name": "node_var",
|
||||
"description": "",
|
||||
"selector": ["test_node", "node_var"],
|
||||
"value_type": "array[any]",
|
||||
"edited": True,
|
||||
"visible": False,
|
||||
}
|
||||
)
|
||||
|
||||
assert marshal(node_var, _WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS) == expected_without_value
|
||||
expected_with_value = expected_without_value.copy()
|
||||
expected_with_value["value"] = [1, "a"]
|
||||
assert marshal(node_var, _WORKFLOW_DRAFT_VARIABLE_FIELDS) == expected_with_value
|
||||
|
||||
|
||||
class TestWorkflowDraftVariableList:
|
||||
def test_workflow_draft_variable_list(self):
|
||||
class TestCase(NamedTuple):
|
||||
name: str
|
||||
var_list: WorkflowDraftVariableList
|
||||
expected: dict
|
||||
|
||||
node_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=_TEST_APP_ID,
|
||||
node_id="test_node",
|
||||
name="test_var",
|
||||
value=build_segment("a"),
|
||||
visible=True,
|
||||
node_execution_id=_TEST_NODE_EXEC_ID,
|
||||
)
|
||||
node_var.id = str(uuid.uuid4())
|
||||
node_var_dict = OrderedDict(
|
||||
{
|
||||
"id": str(node_var.id),
|
||||
"type": node_var.get_variable_type().value,
|
||||
"name": "test_var",
|
||||
"description": "",
|
||||
"selector": ["test_node", "test_var"],
|
||||
"value_type": "string",
|
||||
"edited": False,
|
||||
"visible": True,
|
||||
}
|
||||
)
|
||||
|
||||
cases = [
|
||||
TestCase(
|
||||
name="empty variable list",
|
||||
var_list=WorkflowDraftVariableList(variables=[]),
|
||||
expected=OrderedDict(
|
||||
{
|
||||
"items": [],
|
||||
"total": None,
|
||||
}
|
||||
),
|
||||
),
|
||||
TestCase(
|
||||
name="empty variable list with total",
|
||||
var_list=WorkflowDraftVariableList(variables=[], total=10),
|
||||
expected=OrderedDict(
|
||||
{
|
||||
"items": [],
|
||||
"total": 10,
|
||||
}
|
||||
),
|
||||
),
|
||||
TestCase(
|
||||
name="non-empty variable list",
|
||||
var_list=WorkflowDraftVariableList(variables=[node_var], total=None),
|
||||
expected=OrderedDict(
|
||||
{
|
||||
"items": [node_var_dict],
|
||||
"total": None,
|
||||
}
|
||||
),
|
||||
),
|
||||
TestCase(
|
||||
name="non-empty variable list with total",
|
||||
var_list=WorkflowDraftVariableList(variables=[node_var], total=10),
|
||||
expected=OrderedDict(
|
||||
{
|
||||
"items": [node_var_dict],
|
||||
"total": 10,
|
||||
}
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
for idx, case in enumerate(cases, 1):
|
||||
assert marshal(case.var_list, _WORKFLOW_DRAFT_VARIABLE_LIST_WITHOUT_VALUE_FIELDS) == case.expected, (
|
||||
f"Test case {idx} failed, {case.name=}"
|
||||
)
|
||||
|
||||
|
||||
def test_workflow_node_variables_fields():
|
||||
conv_var = WorkflowDraftVariable.new_conversation_variable(
|
||||
app_id=_TEST_APP_ID, name="conv_var", value=build_segment(1)
|
||||
)
|
||||
resp = marshal(WorkflowDraftVariableList(variables=[conv_var]), _WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
assert isinstance(resp, dict)
|
||||
assert len(resp["items"]) == 1
|
||||
item_dict = resp["items"][0]
|
||||
assert item_dict["name"] == "conv_var"
|
||||
assert item_dict["value"] == 1
|
||||
|
||||
|
||||
def test_workflow_file_variable_with_signed_url():
|
||||
"""Test that File type variables include signed URLs in API responses."""
|
||||
from core.file.enums import FileTransferMethod, FileType
|
||||
from core.file.models import File
|
||||
|
||||
# Create a File object with LOCAL_FILE transfer method (which generates signed URLs)
|
||||
test_file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.LOCAL_FILE,
|
||||
related_id="test_upload_file_id",
|
||||
filename="test.jpg",
|
||||
extension=".jpg",
|
||||
mime_type="image/jpeg",
|
||||
size=12345,
|
||||
)
|
||||
|
||||
# Create a WorkflowDraftVariable with the File
|
||||
file_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=_TEST_APP_ID,
|
||||
node_id="test_node",
|
||||
name="file_var",
|
||||
value=build_segment(test_file),
|
||||
node_execution_id=_TEST_NODE_EXEC_ID,
|
||||
)
|
||||
|
||||
# Marshal the variable using the API fields
|
||||
resp = marshal(WorkflowDraftVariableList(variables=[file_var]), _WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
|
||||
# Verify the response structure
|
||||
assert isinstance(resp, dict)
|
||||
assert len(resp["items"]) == 1
|
||||
item_dict = resp["items"][0]
|
||||
assert item_dict["name"] == "file_var"
|
||||
|
||||
# Verify the value is a dict (File.to_dict() result) and contains expected fields
|
||||
value = item_dict["value"]
|
||||
assert isinstance(value, dict)
|
||||
|
||||
# Verify the File fields are preserved
|
||||
assert value["id"] == test_file.id
|
||||
assert value["filename"] == test_file.filename
|
||||
assert value["type"] == test_file.type.value
|
||||
assert value["transfer_method"] == test_file.transfer_method.value
|
||||
assert value["size"] == test_file.size
|
||||
|
||||
# Verify the URL is present (it should be a signed URL for LOCAL_FILE transfer method)
|
||||
remote_url = value["remote_url"]
|
||||
assert remote_url is not None
|
||||
|
||||
assert isinstance(remote_url, str)
|
||||
# For LOCAL_FILE, the URL should contain signature parameters
|
||||
assert "timestamp=" in remote_url
|
||||
assert "nonce=" in remote_url
|
||||
assert "sign=" in remote_url
|
||||
|
||||
|
||||
def test_workflow_file_variable_remote_url():
|
||||
"""Test that File type variables with REMOTE_URL transfer method return the remote URL."""
|
||||
from core.file.enums import FileTransferMethod, FileType
|
||||
from core.file.models import File
|
||||
|
||||
# Create a File object with REMOTE_URL transfer method
|
||||
test_file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://example.com/test.jpg",
|
||||
filename="test.jpg",
|
||||
extension=".jpg",
|
||||
mime_type="image/jpeg",
|
||||
size=12345,
|
||||
)
|
||||
|
||||
# Create a WorkflowDraftVariable with the File
|
||||
file_var = WorkflowDraftVariable.new_node_variable(
|
||||
app_id=_TEST_APP_ID,
|
||||
node_id="test_node",
|
||||
name="file_var",
|
||||
value=build_segment(test_file),
|
||||
node_execution_id=_TEST_NODE_EXEC_ID,
|
||||
)
|
||||
|
||||
# Marshal the variable using the API fields
|
||||
resp = marshal(WorkflowDraftVariableList(variables=[file_var]), _WORKFLOW_DRAFT_VARIABLE_LIST_FIELDS)
|
||||
|
||||
# Verify the response structure
|
||||
assert isinstance(resp, dict)
|
||||
assert len(resp["items"]) == 1
|
||||
item_dict = resp["items"][0]
|
||||
assert item_dict["name"] == "file_var"
|
||||
|
||||
# Verify the value is a dict (File.to_dict() result) and contains expected fields
|
||||
value = item_dict["value"]
|
||||
assert isinstance(value, dict)
|
||||
remote_url = value["remote_url"]
|
||||
|
||||
# For REMOTE_URL, the URL should be the original remote URL
|
||||
assert remote_url == test_file.remote_url
|
||||
@ -1,165 +0,0 @@
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from core.variables import (
|
||||
ArrayNumberVariable,
|
||||
ArrayObjectVariable,
|
||||
ArrayStringVariable,
|
||||
FloatVariable,
|
||||
IntegerVariable,
|
||||
ObjectSegment,
|
||||
SecretVariable,
|
||||
StringVariable,
|
||||
)
|
||||
from core.variables.exc import VariableError
|
||||
from core.variables.segments import ArrayAnySegment
|
||||
from factories import variable_factory
|
||||
|
||||
|
||||
def test_string_variable():
|
||||
test_data = {"value_type": "string", "name": "test_text", "value": "Hello, World!"}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, StringVariable)
|
||||
|
||||
|
||||
def test_integer_variable():
|
||||
test_data = {"value_type": "number", "name": "test_int", "value": 42}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, IntegerVariable)
|
||||
|
||||
|
||||
def test_float_variable():
|
||||
test_data = {"value_type": "number", "name": "test_float", "value": 3.14}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, FloatVariable)
|
||||
|
||||
|
||||
def test_secret_variable():
|
||||
test_data = {"value_type": "secret", "name": "test_secret", "value": "secret_value"}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, SecretVariable)
|
||||
|
||||
|
||||
def test_invalid_value_type():
|
||||
test_data = {"value_type": "unknown", "name": "test_invalid", "value": "value"}
|
||||
with pytest.raises(VariableError):
|
||||
variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
|
||||
|
||||
def test_build_a_blank_string():
|
||||
result = variable_factory.build_conversation_variable_from_mapping(
|
||||
{
|
||||
"value_type": "string",
|
||||
"name": "blank",
|
||||
"value": "",
|
||||
}
|
||||
)
|
||||
assert isinstance(result, StringVariable)
|
||||
assert result.value == ""
|
||||
|
||||
|
||||
def test_build_a_object_variable_with_none_value():
|
||||
var = variable_factory.build_segment(
|
||||
{
|
||||
"key1": None,
|
||||
}
|
||||
)
|
||||
assert isinstance(var, ObjectSegment)
|
||||
assert var.value["key1"] is None
|
||||
|
||||
|
||||
def test_object_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "object",
|
||||
"name": "test_object",
|
||||
"description": "Description of the variable.",
|
||||
"value": {
|
||||
"key1": "text",
|
||||
"key2": 2,
|
||||
},
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ObjectSegment)
|
||||
assert isinstance(variable.value["key1"], str)
|
||||
assert isinstance(variable.value["key2"], int)
|
||||
|
||||
|
||||
def test_array_string_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[string]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
"text",
|
||||
"text",
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayStringVariable)
|
||||
assert isinstance(variable.value[0], str)
|
||||
assert isinstance(variable.value[1], str)
|
||||
|
||||
|
||||
def test_array_number_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[number]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
1,
|
||||
2.0,
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayNumberVariable)
|
||||
assert isinstance(variable.value[0], int)
|
||||
assert isinstance(variable.value[1], float)
|
||||
|
||||
|
||||
def test_array_object_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[object]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
{
|
||||
"key1": "text",
|
||||
"key2": 1,
|
||||
},
|
||||
{
|
||||
"key1": "text",
|
||||
"key2": 1,
|
||||
},
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayObjectVariable)
|
||||
assert isinstance(variable.value[0], dict)
|
||||
assert isinstance(variable.value[1], dict)
|
||||
assert isinstance(variable.value[0]["key1"], str)
|
||||
assert isinstance(variable.value[0]["key2"], int)
|
||||
assert isinstance(variable.value[1]["key1"], str)
|
||||
assert isinstance(variable.value[1]["key2"], int)
|
||||
|
||||
|
||||
def test_variable_cannot_large_than_200_kb():
|
||||
with pytest.raises(VariableError):
|
||||
variable_factory.build_conversation_variable_from_mapping(
|
||||
{
|
||||
"id": str(uuid4()),
|
||||
"value_type": "string",
|
||||
"name": "test_text",
|
||||
"value": "a" * 1024 * 201,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def test_array_none_variable():
|
||||
var = variable_factory.build_segment([None, None, None, None])
|
||||
assert isinstance(var, ArrayAnySegment)
|
||||
assert var.value == [None, None, None, None]
|
||||
@ -0,0 +1,25 @@
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
|
||||
|
||||
def test_file():
|
||||
file = File(
|
||||
id="test-file",
|
||||
tenant_id="test-tenant-id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.TOOL_FILE,
|
||||
related_id="test-related-id",
|
||||
filename="image.png",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=67,
|
||||
storage_key="test-storage-key",
|
||||
url="https://example.com/image.png",
|
||||
)
|
||||
assert file.tenant_id == "test-tenant-id"
|
||||
assert file.type == FileType.IMAGE
|
||||
assert file.transfer_method == FileTransferMethod.TOOL_FILE
|
||||
assert file.related_id == "test-related-id"
|
||||
assert file.filename == "image.png"
|
||||
assert file.extension == ".png"
|
||||
assert file.mime_type == "image/png"
|
||||
assert file.size == 67
|
||||
@ -0,0 +1,865 @@
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
from hypothesis import given
|
||||
from hypothesis import strategies as st
|
||||
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.variables import (
|
||||
ArrayNumberVariable,
|
||||
ArrayObjectVariable,
|
||||
ArrayStringVariable,
|
||||
FloatVariable,
|
||||
IntegerVariable,
|
||||
ObjectSegment,
|
||||
SecretVariable,
|
||||
SegmentType,
|
||||
StringVariable,
|
||||
)
|
||||
from core.variables.exc import VariableError
|
||||
from core.variables.segments import (
|
||||
ArrayAnySegment,
|
||||
ArrayFileSegment,
|
||||
ArrayNumberSegment,
|
||||
ArrayObjectSegment,
|
||||
ArrayStringSegment,
|
||||
FileSegment,
|
||||
FloatSegment,
|
||||
IntegerSegment,
|
||||
NoneSegment,
|
||||
ObjectSegment,
|
||||
StringSegment,
|
||||
)
|
||||
from core.variables.types import SegmentType
|
||||
from factories import variable_factory
|
||||
from factories.variable_factory import TypeMismatchError, build_segment_with_type
|
||||
|
||||
|
||||
def test_string_variable():
|
||||
test_data = {"value_type": "string", "name": "test_text", "value": "Hello, World!"}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, StringVariable)
|
||||
|
||||
|
||||
def test_integer_variable():
|
||||
test_data = {"value_type": "number", "name": "test_int", "value": 42}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, IntegerVariable)
|
||||
|
||||
|
||||
def test_float_variable():
|
||||
test_data = {"value_type": "number", "name": "test_float", "value": 3.14}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, FloatVariable)
|
||||
|
||||
|
||||
def test_secret_variable():
|
||||
test_data = {"value_type": "secret", "name": "test_secret", "value": "secret_value"}
|
||||
result = variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
assert isinstance(result, SecretVariable)
|
||||
|
||||
|
||||
def test_invalid_value_type():
|
||||
test_data = {"value_type": "unknown", "name": "test_invalid", "value": "value"}
|
||||
with pytest.raises(VariableError):
|
||||
variable_factory.build_conversation_variable_from_mapping(test_data)
|
||||
|
||||
|
||||
def test_build_a_blank_string():
|
||||
result = variable_factory.build_conversation_variable_from_mapping(
|
||||
{
|
||||
"value_type": "string",
|
||||
"name": "blank",
|
||||
"value": "",
|
||||
}
|
||||
)
|
||||
assert isinstance(result, StringVariable)
|
||||
assert result.value == ""
|
||||
|
||||
|
||||
def test_build_a_object_variable_with_none_value():
|
||||
var = variable_factory.build_segment(
|
||||
{
|
||||
"key1": None,
|
||||
}
|
||||
)
|
||||
assert isinstance(var, ObjectSegment)
|
||||
assert var.value["key1"] is None
|
||||
|
||||
|
||||
def test_object_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "object",
|
||||
"name": "test_object",
|
||||
"description": "Description of the variable.",
|
||||
"value": {
|
||||
"key1": "text",
|
||||
"key2": 2,
|
||||
},
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ObjectSegment)
|
||||
assert isinstance(variable.value["key1"], str)
|
||||
assert isinstance(variable.value["key2"], int)
|
||||
|
||||
|
||||
def test_array_string_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[string]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
"text",
|
||||
"text",
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayStringVariable)
|
||||
assert isinstance(variable.value[0], str)
|
||||
assert isinstance(variable.value[1], str)
|
||||
|
||||
|
||||
def test_array_number_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[number]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
1,
|
||||
2.0,
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayNumberVariable)
|
||||
assert isinstance(variable.value[0], int)
|
||||
assert isinstance(variable.value[1], float)
|
||||
|
||||
|
||||
def test_array_object_variable():
|
||||
mapping = {
|
||||
"id": str(uuid4()),
|
||||
"value_type": "array[object]",
|
||||
"name": "test_array",
|
||||
"description": "Description of the variable.",
|
||||
"value": [
|
||||
{
|
||||
"key1": "text",
|
||||
"key2": 1,
|
||||
},
|
||||
{
|
||||
"key1": "text",
|
||||
"key2": 1,
|
||||
},
|
||||
],
|
||||
}
|
||||
variable = variable_factory.build_conversation_variable_from_mapping(mapping)
|
||||
assert isinstance(variable, ArrayObjectVariable)
|
||||
assert isinstance(variable.value[0], dict)
|
||||
assert isinstance(variable.value[1], dict)
|
||||
assert isinstance(variable.value[0]["key1"], str)
|
||||
assert isinstance(variable.value[0]["key2"], int)
|
||||
assert isinstance(variable.value[1]["key1"], str)
|
||||
assert isinstance(variable.value[1]["key2"], int)
|
||||
|
||||
|
||||
def test_variable_cannot_large_than_200_kb():
|
||||
with pytest.raises(VariableError):
|
||||
variable_factory.build_conversation_variable_from_mapping(
|
||||
{
|
||||
"id": str(uuid4()),
|
||||
"value_type": "string",
|
||||
"name": "test_text",
|
||||
"value": "a" * 1024 * 201,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def test_array_none_variable():
|
||||
var = variable_factory.build_segment([None, None, None, None])
|
||||
assert isinstance(var, ArrayAnySegment)
|
||||
assert var.value == [None, None, None, None]
|
||||
|
||||
|
||||
def test_build_segment_none_type():
|
||||
"""Test building NoneSegment from None value."""
|
||||
segment = variable_factory.build_segment(None)
|
||||
assert isinstance(segment, NoneSegment)
|
||||
assert segment.value is None
|
||||
assert segment.value_type == SegmentType.NONE
|
||||
|
||||
|
||||
def test_build_segment_none_type_properties():
|
||||
"""Test NoneSegment properties and methods."""
|
||||
segment = variable_factory.build_segment(None)
|
||||
assert segment.text == ""
|
||||
assert segment.log == ""
|
||||
assert segment.markdown == ""
|
||||
assert segment.to_object() is None
|
||||
|
||||
|
||||
def test_build_segment_array_file_single_file():
|
||||
"""Test building ArrayFileSegment from list with single file."""
|
||||
file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file.png",
|
||||
filename="test-file",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
segment = variable_factory.build_segment([file])
|
||||
assert isinstance(segment, ArrayFileSegment)
|
||||
assert len(segment.value) == 1
|
||||
assert segment.value[0] == file
|
||||
assert segment.value_type == SegmentType.ARRAY_FILE
|
||||
|
||||
|
||||
def test_build_segment_array_file_multiple_files():
|
||||
"""Test building ArrayFileSegment from list with multiple files."""
|
||||
file1 = File(
|
||||
id="test_file_id_1",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file1.png",
|
||||
filename="test-file1",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
file2 = File(
|
||||
id="test_file_id_2",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.DOCUMENT,
|
||||
transfer_method=FileTransferMethod.LOCAL_FILE,
|
||||
related_id="test_relation_id",
|
||||
filename="test-file2",
|
||||
extension=".txt",
|
||||
mime_type="text/plain",
|
||||
size=500,
|
||||
)
|
||||
segment = variable_factory.build_segment([file1, file2])
|
||||
assert isinstance(segment, ArrayFileSegment)
|
||||
assert len(segment.value) == 2
|
||||
assert segment.value[0] == file1
|
||||
assert segment.value[1] == file2
|
||||
assert segment.value_type == SegmentType.ARRAY_FILE
|
||||
|
||||
|
||||
def test_build_segment_array_file_empty_list():
|
||||
"""Test building ArrayFileSegment from empty list should create ArrayAnySegment."""
|
||||
segment = variable_factory.build_segment([])
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == []
|
||||
assert segment.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
|
||||
def test_build_segment_array_any_mixed_types():
|
||||
"""Test building ArrayAnySegment from list with mixed types."""
|
||||
mixed_values = ["string", 42, 3.14, {"key": "value"}, None]
|
||||
segment = variable_factory.build_segment(mixed_values)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == mixed_values
|
||||
assert segment.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
|
||||
def test_build_segment_array_any_with_nested_arrays():
|
||||
"""Test building ArrayAnySegment from list containing arrays."""
|
||||
nested_values = [["nested", "array"], [1, 2, 3], "string"]
|
||||
segment = variable_factory.build_segment(nested_values)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == nested_values
|
||||
assert segment.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
|
||||
def test_build_segment_array_any_mixed_with_files():
|
||||
"""Test building ArrayAnySegment from list with files and other types."""
|
||||
file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file.png",
|
||||
filename="test-file",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
mixed_values = [file, "string", 42]
|
||||
segment = variable_factory.build_segment(mixed_values)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == mixed_values
|
||||
assert segment.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
|
||||
def test_build_segment_array_any_all_none_values():
|
||||
"""Test building ArrayAnySegment from list with all None values."""
|
||||
none_values = [None, None, None]
|
||||
segment = variable_factory.build_segment(none_values)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == none_values
|
||||
assert segment.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
|
||||
def test_build_segment_array_file_properties():
|
||||
"""Test ArrayFileSegment properties and methods."""
|
||||
file1 = File(
|
||||
id="test_file_id_1",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file1.png",
|
||||
filename="test-file1",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
file2 = File(
|
||||
id="test_file_id_2",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.DOCUMENT,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file2.txt",
|
||||
filename="test-file2",
|
||||
extension=".txt",
|
||||
mime_type="text/plain",
|
||||
size=500,
|
||||
)
|
||||
segment = variable_factory.build_segment([file1, file2])
|
||||
|
||||
# Test properties
|
||||
assert segment.text == "" # ArrayFileSegment text property returns empty string
|
||||
assert segment.log == "" # ArrayFileSegment log property returns empty string
|
||||
assert segment.markdown == f"{file1.markdown}\n{file2.markdown}"
|
||||
assert segment.to_object() == [file1, file2]
|
||||
|
||||
|
||||
def test_build_segment_array_any_properties():
|
||||
"""Test ArrayAnySegment properties and methods."""
|
||||
mixed_values = ["string", 42, None]
|
||||
segment = variable_factory.build_segment(mixed_values)
|
||||
|
||||
# Test properties
|
||||
assert segment.text == str(mixed_values)
|
||||
assert segment.log == str(mixed_values)
|
||||
assert segment.markdown == "string\n42\nNone"
|
||||
assert segment.to_object() == mixed_values
|
||||
|
||||
|
||||
def test_build_segment_edge_cases():
|
||||
"""Test edge cases for build_segment function."""
|
||||
# Test with complex nested structures
|
||||
complex_structure = [{"nested": {"deep": [1, 2, 3]}}, [{"inner": "value"}], "mixed"]
|
||||
segment = variable_factory.build_segment(complex_structure)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == complex_structure
|
||||
|
||||
# Test with single None in list
|
||||
single_none = [None]
|
||||
segment = variable_factory.build_segment(single_none)
|
||||
assert isinstance(segment, ArrayAnySegment)
|
||||
assert segment.value == single_none
|
||||
|
||||
|
||||
def test_build_segment_file_array_with_different_file_types():
|
||||
"""Test ArrayFileSegment with different file types."""
|
||||
image_file = File(
|
||||
id="image_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/image.png",
|
||||
filename="image",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
|
||||
video_file = File(
|
||||
id="video_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.VIDEO,
|
||||
transfer_method=FileTransferMethod.LOCAL_FILE,
|
||||
related_id="video_relation_id",
|
||||
filename="video",
|
||||
extension=".mp4",
|
||||
mime_type="video/mp4",
|
||||
size=5000,
|
||||
)
|
||||
|
||||
audio_file = File(
|
||||
id="audio_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.AUDIO,
|
||||
transfer_method=FileTransferMethod.LOCAL_FILE,
|
||||
related_id="audio_relation_id",
|
||||
filename="audio",
|
||||
extension=".mp3",
|
||||
mime_type="audio/mpeg",
|
||||
size=3000,
|
||||
)
|
||||
|
||||
segment = variable_factory.build_segment([image_file, video_file, audio_file])
|
||||
assert isinstance(segment, ArrayFileSegment)
|
||||
assert len(segment.value) == 3
|
||||
assert segment.value[0].type == FileType.IMAGE
|
||||
assert segment.value[1].type == FileType.VIDEO
|
||||
assert segment.value[2].type == FileType.AUDIO
|
||||
|
||||
|
||||
@st.composite
|
||||
def _generate_file(draw) -> File:
|
||||
file_id = draw(st.text(min_size=1, max_size=10))
|
||||
tenant_id = draw(st.text(min_size=1, max_size=10))
|
||||
file_type, mime_type, extension = draw(
|
||||
st.sampled_from(
|
||||
[
|
||||
(FileType.IMAGE, "image/png", ".png"),
|
||||
(FileType.VIDEO, "video/mp4", ".mp4"),
|
||||
(FileType.DOCUMENT, "text/plain", ".txt"),
|
||||
(FileType.AUDIO, "audio/mpeg", ".mp3"),
|
||||
]
|
||||
)
|
||||
)
|
||||
filename = "test-file"
|
||||
size = draw(st.integers(min_value=0, max_value=1024 * 1024))
|
||||
|
||||
transfer_method = draw(st.sampled_from(list(FileTransferMethod)))
|
||||
if transfer_method == FileTransferMethod.REMOTE_URL:
|
||||
url = "https://test.example.com/test-file"
|
||||
file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=file_type,
|
||||
transfer_method=transfer_method,
|
||||
remote_url=url,
|
||||
related_id=None,
|
||||
filename=filename,
|
||||
extension=extension,
|
||||
mime_type=mime_type,
|
||||
size=size,
|
||||
)
|
||||
else:
|
||||
relation_id = draw(st.uuids(version=4))
|
||||
|
||||
file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=file_type,
|
||||
transfer_method=transfer_method,
|
||||
related_id=str(relation_id),
|
||||
filename=filename,
|
||||
extension=extension,
|
||||
mime_type=mime_type,
|
||||
size=size,
|
||||
)
|
||||
return file
|
||||
|
||||
|
||||
def _scalar_value() -> st.SearchStrategy[int | float | str | File | None]:
|
||||
return st.one_of(
|
||||
st.none(),
|
||||
st.integers(),
|
||||
st.floats(),
|
||||
st.text(),
|
||||
_generate_file(),
|
||||
)
|
||||
|
||||
|
||||
@given(_scalar_value())
|
||||
def test_build_segment_and_extract_values_for_scalar_types(value):
|
||||
seg = variable_factory.build_segment(value)
|
||||
# nan == nan yields false, so we need to use `math.isnan` to check `seg.value` here.
|
||||
if isinstance(value, float) and math.isnan(value):
|
||||
assert math.isnan(seg.value)
|
||||
else:
|
||||
assert seg.value == value
|
||||
|
||||
|
||||
@given(st.lists(_scalar_value()))
|
||||
def test_build_segment_and_extract_values_for_array_types(values):
|
||||
seg = variable_factory.build_segment(values)
|
||||
assert seg.value == values
|
||||
|
||||
|
||||
def test_build_segment_type_for_scalar():
|
||||
@dataclass(frozen=True)
|
||||
class TestCase:
|
||||
value: int | float | str | File
|
||||
expected_type: SegmentType
|
||||
|
||||
file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file.png",
|
||||
filename="test-file",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
)
|
||||
cases = [
|
||||
TestCase(0, SegmentType.NUMBER),
|
||||
TestCase(0.0, SegmentType.NUMBER),
|
||||
TestCase("", SegmentType.STRING),
|
||||
TestCase(file, SegmentType.FILE),
|
||||
]
|
||||
|
||||
for idx, c in enumerate(cases, 1):
|
||||
segment = variable_factory.build_segment(c.value)
|
||||
assert segment.value_type == c.expected_type, f"test case {idx} failed."
|
||||
|
||||
|
||||
class TestBuildSegmentWithType:
|
||||
"""Test cases for build_segment_with_type function."""
|
||||
|
||||
def test_string_type(self):
|
||||
"""Test building a string segment with correct type."""
|
||||
result = build_segment_with_type(SegmentType.STRING, "hello")
|
||||
assert isinstance(result, StringSegment)
|
||||
assert result.value == "hello"
|
||||
assert result.value_type == SegmentType.STRING
|
||||
|
||||
def test_number_type_integer(self):
|
||||
"""Test building a number segment with integer value."""
|
||||
result = build_segment_with_type(SegmentType.NUMBER, 42)
|
||||
assert isinstance(result, IntegerSegment)
|
||||
assert result.value == 42
|
||||
assert result.value_type == SegmentType.NUMBER
|
||||
|
||||
def test_number_type_float(self):
|
||||
"""Test building a number segment with float value."""
|
||||
result = build_segment_with_type(SegmentType.NUMBER, 3.14)
|
||||
assert isinstance(result, FloatSegment)
|
||||
assert result.value == 3.14
|
||||
assert result.value_type == SegmentType.NUMBER
|
||||
|
||||
def test_object_type(self):
|
||||
"""Test building an object segment with correct type."""
|
||||
test_obj = {"key": "value", "nested": {"inner": 123}}
|
||||
result = build_segment_with_type(SegmentType.OBJECT, test_obj)
|
||||
assert isinstance(result, ObjectSegment)
|
||||
assert result.value == test_obj
|
||||
assert result.value_type == SegmentType.OBJECT
|
||||
|
||||
def test_file_type(self):
|
||||
"""Test building a file segment with correct type."""
|
||||
test_file = File(
|
||||
id="test_file_id",
|
||||
tenant_id="test_tenant_id",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url="https://test.example.com/test-file.png",
|
||||
filename="test-file",
|
||||
extension=".png",
|
||||
mime_type="image/png",
|
||||
size=1000,
|
||||
storage_key="test_storage_key",
|
||||
)
|
||||
result = build_segment_with_type(SegmentType.FILE, test_file)
|
||||
assert isinstance(result, FileSegment)
|
||||
assert result.value == test_file
|
||||
assert result.value_type == SegmentType.FILE
|
||||
|
||||
def test_none_type(self):
|
||||
"""Test building a none segment with None value."""
|
||||
result = build_segment_with_type(SegmentType.NONE, None)
|
||||
assert isinstance(result, NoneSegment)
|
||||
assert result.value is None
|
||||
assert result.value_type == SegmentType.NONE
|
||||
|
||||
def test_empty_array_string(self):
|
||||
"""Test building an empty array[string] segment."""
|
||||
result = build_segment_with_type(SegmentType.ARRAY_STRING, [])
|
||||
assert isinstance(result, ArrayStringSegment)
|
||||
assert result.value == []
|
||||
assert result.value_type == SegmentType.ARRAY_STRING
|
||||
|
||||
def test_empty_array_number(self):
|
||||
"""Test building an empty array[number] segment."""
|
||||
result = build_segment_with_type(SegmentType.ARRAY_NUMBER, [])
|
||||
assert isinstance(result, ArrayNumberSegment)
|
||||
assert result.value == []
|
||||
assert result.value_type == SegmentType.ARRAY_NUMBER
|
||||
|
||||
def test_empty_array_object(self):
|
||||
"""Test building an empty array[object] segment."""
|
||||
result = build_segment_with_type(SegmentType.ARRAY_OBJECT, [])
|
||||
assert isinstance(result, ArrayObjectSegment)
|
||||
assert result.value == []
|
||||
assert result.value_type == SegmentType.ARRAY_OBJECT
|
||||
|
||||
def test_empty_array_file(self):
|
||||
"""Test building an empty array[file] segment."""
|
||||
result = build_segment_with_type(SegmentType.ARRAY_FILE, [])
|
||||
assert isinstance(result, ArrayFileSegment)
|
||||
assert result.value == []
|
||||
assert result.value_type == SegmentType.ARRAY_FILE
|
||||
|
||||
def test_empty_array_any(self):
|
||||
"""Test building an empty array[any] segment."""
|
||||
result = build_segment_with_type(SegmentType.ARRAY_ANY, [])
|
||||
assert isinstance(result, ArrayAnySegment)
|
||||
assert result.value == []
|
||||
assert result.value_type == SegmentType.ARRAY_ANY
|
||||
|
||||
def test_array_with_values(self):
|
||||
"""Test building array segments with actual values."""
|
||||
# Array of strings
|
||||
result = build_segment_with_type(SegmentType.ARRAY_STRING, ["hello", "world"])
|
||||
assert isinstance(result, ArrayStringSegment)
|
||||
assert result.value == ["hello", "world"]
|
||||
assert result.value_type == SegmentType.ARRAY_STRING
|
||||
|
||||
# Array of numbers
|
||||
result = build_segment_with_type(SegmentType.ARRAY_NUMBER, [1, 2, 3.14])
|
||||
assert isinstance(result, ArrayNumberSegment)
|
||||
assert result.value == [1, 2, 3.14]
|
||||
assert result.value_type == SegmentType.ARRAY_NUMBER
|
||||
|
||||
# Array of objects
|
||||
result = build_segment_with_type(SegmentType.ARRAY_OBJECT, [{"a": 1}, {"b": 2}])
|
||||
assert isinstance(result, ArrayObjectSegment)
|
||||
assert result.value == [{"a": 1}, {"b": 2}]
|
||||
assert result.value_type == SegmentType.ARRAY_OBJECT
|
||||
|
||||
def test_type_mismatch_string_to_number(self):
|
||||
"""Test type mismatch when expecting number but getting string."""
|
||||
with pytest.raises(TypeMismatchError) as exc_info:
|
||||
build_segment_with_type(SegmentType.NUMBER, "not_a_number")
|
||||
|
||||
assert "Type mismatch" in str(exc_info.value)
|
||||
assert "expected number" in str(exc_info.value)
|
||||
assert "str" in str(exc_info.value)
|
||||
|
||||
def test_type_mismatch_number_to_string(self):
|
||||
"""Test type mismatch when expecting string but getting number."""
|
||||
with pytest.raises(TypeMismatchError) as exc_info:
|
||||
build_segment_with_type(SegmentType.STRING, 123)
|
||||
|
||||
assert "Type mismatch" in str(exc_info.value)
|
||||
assert "expected string" in str(exc_info.value)
|
||||
assert "int" in str(exc_info.value)
|
||||
|
||||
def test_type_mismatch_none_to_string(self):
|
||||
"""Test type mismatch when expecting string but getting None."""
|
||||
with pytest.raises(TypeMismatchError) as exc_info:
|
||||
build_segment_with_type(SegmentType.STRING, None)
|
||||
|
||||
assert "Expected string, but got None" in str(exc_info.value)
|
||||
|
||||
def test_type_mismatch_empty_list_to_non_array(self):
|
||||
"""Test type mismatch when expecting non-array type but getting empty list."""
|
||||
with pytest.raises(TypeMismatchError) as exc_info:
|
||||
build_segment_with_type(SegmentType.STRING, [])
|
||||
|
||||
assert "Expected string, but got empty list" in str(exc_info.value)
|
||||
|
||||
def test_type_mismatch_object_to_array(self):
|
||||
"""Test type mismatch when expecting array but getting object."""
|
||||
with pytest.raises(TypeMismatchError) as exc_info:
|
||||
build_segment_with_type(SegmentType.ARRAY_STRING, {"key": "value"})
|
||||
|
||||
assert "Type mismatch" in str(exc_info.value)
|
||||
assert "expected array[string]" in str(exc_info.value)
|
||||
|
||||
def test_compatible_number_types(self):
|
||||
"""Test that int and float are both compatible with NUMBER type."""
|
||||
# Integer should work
|
||||
result_int = build_segment_with_type(SegmentType.NUMBER, 42)
|
||||
assert isinstance(result_int, IntegerSegment)
|
||||
assert result_int.value_type == SegmentType.NUMBER
|
||||
|
||||
# Float should work
|
||||
result_float = build_segment_with_type(SegmentType.NUMBER, 3.14)
|
||||
assert isinstance(result_float, FloatSegment)
|
||||
assert result_float.value_type == SegmentType.NUMBER
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("segment_type", "value", "expected_class"),
|
||||
[
|
||||
(SegmentType.STRING, "test", StringSegment),
|
||||
(SegmentType.NUMBER, 42, IntegerSegment),
|
||||
(SegmentType.NUMBER, 3.14, FloatSegment),
|
||||
(SegmentType.OBJECT, {}, ObjectSegment),
|
||||
(SegmentType.NONE, None, NoneSegment),
|
||||
(SegmentType.ARRAY_STRING, [], ArrayStringSegment),
|
||||
(SegmentType.ARRAY_NUMBER, [], ArrayNumberSegment),
|
||||
(SegmentType.ARRAY_OBJECT, [], ArrayObjectSegment),
|
||||
(SegmentType.ARRAY_ANY, [], ArrayAnySegment),
|
||||
],
|
||||
)
|
||||
def test_parametrized_valid_types(self, segment_type, value, expected_class):
|
||||
"""Parametrized test for valid type combinations."""
|
||||
result = build_segment_with_type(segment_type, value)
|
||||
assert isinstance(result, expected_class)
|
||||
assert result.value == value
|
||||
assert result.value_type == segment_type
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("segment_type", "value"),
|
||||
[
|
||||
(SegmentType.STRING, 123),
|
||||
(SegmentType.NUMBER, "not_a_number"),
|
||||
(SegmentType.OBJECT, "not_an_object"),
|
||||
(SegmentType.ARRAY_STRING, "not_an_array"),
|
||||
(SegmentType.STRING, None),
|
||||
(SegmentType.NUMBER, None),
|
||||
],
|
||||
)
|
||||
def test_parametrized_type_mismatches(self, segment_type, value):
|
||||
"""Parametrized test for type mismatches that should raise TypeMismatchError."""
|
||||
with pytest.raises(TypeMismatchError):
|
||||
build_segment_with_type(segment_type, value)
|
||||
|
||||
|
||||
# Test cases for ValueError scenarios in build_segment function
|
||||
class TestBuildSegmentValueErrors:
|
||||
"""Test cases for ValueError scenarios in the build_segment function."""
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ValueErrorTestCase:
|
||||
"""Test case data for ValueError scenarios."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
test_value: Any
|
||||
|
||||
def _get_test_cases(self):
|
||||
"""Get all test cases for ValueError scenarios."""
|
||||
|
||||
# Define inline classes for complex test cases
|
||||
class CustomType:
|
||||
pass
|
||||
|
||||
def unsupported_function():
|
||||
return "test"
|
||||
|
||||
def gen():
|
||||
yield 1
|
||||
yield 2
|
||||
|
||||
return [
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_custom_type",
|
||||
description="custom class that doesn't match any supported type",
|
||||
test_value=CustomType(),
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_set_type",
|
||||
description="set (unsupported collection type)",
|
||||
test_value={1, 2, 3},
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_tuple_type", description="tuple (unsupported type)", test_value=(1, 2, 3)
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_bytes_type",
|
||||
description="bytes (unsupported type)",
|
||||
test_value=b"hello world",
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_function_type",
|
||||
description="function (unsupported type)",
|
||||
test_value=unsupported_function,
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="unsupported_module_type", description="module (unsupported type)", test_value=math
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="array_with_unsupported_element_types",
|
||||
description="array with unsupported element types",
|
||||
test_value=[CustomType()],
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="mixed_array_with_unsupported_types",
|
||||
description="array with mix of supported and unsupported types",
|
||||
test_value=["valid_string", 42, CustomType()],
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="nested_unsupported_types",
|
||||
description="nested structures containing unsupported types",
|
||||
test_value=[{"valid": "data"}, CustomType()],
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="complex_number_type",
|
||||
description="complex number (unsupported type)",
|
||||
test_value=3 + 4j,
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="range_type", description="range object (unsupported type)", test_value=range(10)
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="generator_type",
|
||||
description="generator (unsupported type)",
|
||||
test_value=gen(),
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="exception_message_contains_value",
|
||||
description="set to verify error message contains the actual unsupported value",
|
||||
test_value={1, 2, 3},
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="array_with_mixed_unsupported_segment_types",
|
||||
description="array processing with unsupported segment types in match",
|
||||
test_value=[CustomType()],
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="frozenset_type",
|
||||
description="frozenset (unsupported type)",
|
||||
test_value=frozenset([1, 2, 3]),
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="memoryview_type",
|
||||
description="memoryview (unsupported type)",
|
||||
test_value=memoryview(b"hello"),
|
||||
),
|
||||
self.ValueErrorTestCase(
|
||||
name="slice_type", description="slice object (unsupported type)", test_value=slice(1, 10, 2)
|
||||
),
|
||||
self.ValueErrorTestCase(name="type_object", description="type object (unsupported type)", test_value=type),
|
||||
self.ValueErrorTestCase(
|
||||
name="generic_object", description="generic object (unsupported type)", test_value=object()
|
||||
),
|
||||
]
|
||||
|
||||
def test_build_segment_unsupported_types(self):
|
||||
"""Table-driven test for all ValueError scenarios in build_segment function."""
|
||||
test_cases = self._get_test_cases()
|
||||
|
||||
for index, test_case in enumerate(test_cases, 1):
|
||||
# Use test value directly
|
||||
test_value = test_case.test_value
|
||||
|
||||
with pytest.raises(ValueError) as exc_info: # noqa: PT012
|
||||
segment = variable_factory.build_segment(test_value)
|
||||
pytest.fail(f"Test case {index} ({test_case.name}) should raise ValueError but not, result={segment}")
|
||||
|
||||
error_message = str(exc_info.value)
|
||||
assert "not supported value" in error_message, (
|
||||
f"Test case {index} ({test_case.name}): Expected 'not supported value' in error message, "
|
||||
f"but got: {error_message}"
|
||||
)
|
||||
|
||||
def test_build_segment_boolean_type_note(self):
|
||||
"""Note: Boolean values are actually handled as integers in Python, so they don't raise ValueError."""
|
||||
# Boolean values in Python are subclasses of int, so they get processed as integers
|
||||
# True becomes IntegerSegment(value=1) and False becomes IntegerSegment(value=0)
|
||||
true_segment = variable_factory.build_segment(True)
|
||||
false_segment = variable_factory.build_segment(False)
|
||||
|
||||
# Verify they are processed as integers, not as errors
|
||||
assert true_segment.value == 1, "Test case 1 (boolean_true): Expected True to be processed as integer 1"
|
||||
assert false_segment.value == 0, "Test case 2 (boolean_false): Expected False to be processed as integer 0"
|
||||
assert true_segment.value_type == SegmentType.NUMBER
|
||||
assert false_segment.value_type == SegmentType.NUMBER
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue