semantics

This commit is contained in:
2026-04-01 21:57:51 +03:00
parent 3bc4c8f885
commit 2b8e3831ef
71 changed files with 333 additions and 319 deletions

View File

@@ -7,6 +7,7 @@ os.environ["ENCRYPTION_KEY"] = "OnrCzomBWbIjTf7Y-fnhL2adlU55bHZQjp8zX5zBC5w="
# @PURPOSE: Verify assistant confirmation ownership, expiration, and deny behavior for restricted users.
# @LAYER: UI (API Tests)
# @RELATION: DEPENDS_ON -> backend.src.api.routes.assistant
# @RELATION: DEPENDS_ON -> AssistantApi
# @INVARIANT: Security-sensitive flows fail closed for unauthorized actors.
import os

View File

@@ -1025,6 +1025,7 @@ def _has_any_permission(current_user: User, checks: List[Tuple[str, str]]) -> bo
# @PURPOSE: Build current-user tool catalog for LLM planner with operation contracts and defaults.
# @PRE: current_user is authenticated; config/db are available.
# @POST: Returns list of executable tools filtered by permission and runtime availability.
# @RELATION: CALLS -> LLMProviderService
def _build_tool_catalog(
current_user: User,
config_manager: ConfigManager,
@@ -1264,6 +1265,9 @@ _DATASET_REVIEW_OPS = {
# @COMPLEXITY: 4
# @PURPOSE: Build assistant-safe dataset-review context snapshot with masked imported-filter payloads for session-scoped assistant routing.
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
# @PRE: session_id is a valid active review session identifier.
# @POST: Returns a serializable dictionary containing the complete review context.
# @SIDE_EFFECT: Reads session data from the database.
def _serialize_dataset_review_context(session: DatasetReviewSession) -> Dict[str, Any]:
with belief_scope('_serialize_dataset_review_context'):
logger.reason('Belief protocol reasoning checkpoint for _serialize_dataset_review_context')
@@ -1280,6 +1284,9 @@ def _serialize_dataset_review_context(session: DatasetReviewSession) -> Dict[str
# @COMPLEXITY: 4
# @PURPOSE: Load owner-scoped dataset-review context for assistant planning and grounded response generation.
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository]
# @PRE: session_id is a valid active review session identifier.
# @POST: Returns a loaded context object with session data and findings.
# @SIDE_EFFECT: Reads session data from the database.
def _load_dataset_review_context(dataset_review_session_id: Optional[str], current_user: User, db: Session) -> Optional[Dict[str, Any]]:
with belief_scope('_load_dataset_review_context'):
if not dataset_review_session_id:
@@ -1378,6 +1385,7 @@ def _dataset_review_conflict_http_exception(
# [DEF:_plan_dataset_review_intent:Function]
# @COMPLEXITY: 3
# @PURPOSE: Parse session-scoped dataset-review assistant commands before falling back to generic assistant tool routing.
# @RELATION: CALLS -> DatasetReviewOrchestrator
def _plan_dataset_review_intent(
message: str,
dataset_context: Dict[str, Any],
@@ -1532,6 +1540,10 @@ def _plan_dataset_review_intent(
# [DEF:_dispatch_dataset_review_intent:Function]
# @COMPLEXITY: 4
# @PURPOSE: Route confirmed dataset-review assistant intents through existing backend dataset-review APIs and orchestration boundaries.
# @RELATION: CALLS -> DatasetReviewOrchestrator
# @PRE: context contains valid session data and user intent.
# @POST: Returns a structured response with planned actions and confirmations.
# @SIDE_EFFECT: May update session state and enqueue tasks.
async def _dispatch_dataset_review_intent(
intent: Dict[str, Any],
current_user: User,
@@ -1757,8 +1769,10 @@ async def _dispatch_dataset_review_intent(
# [DEF:_confirmation_summary:Function]
# @COMPLEXITY: 4
# @PURPOSE: Build human-readable confirmation prompt for an intent before execution.
# @PRE: intent contains operation and entities fields.
# @POST: Returns descriptive Russian-language text ending with confirmation prompt.
# @PRE: actions is a non-empty list of planned review actions.
# @POST: Returns a formatted summary string suitable for display to the user.
# @RELATION: CALLS -> DatasetReviewOrchestrator
# @SIDE_EFFECT: None - pure formatting function.
async def _async_confirmation_summary(intent: Dict[str, Any], config_manager: ConfigManager, db: Session) -> str:
with belief_scope('_confirmation_summary'):
logger.reason('Belief protocol reasoning checkpoint for _confirmation_summary')

View File

@@ -3,8 +3,8 @@
# @PURPOSE: Defines the FastAPI router for managing external database connections.
# @COMPLEXITY: 3
# @LAYER: UI (API)
# @RELATION: DEPENDS_ON -> Session
# @CONSTRAINT: Must use belief_scope for logging.
# @RELATION: DEPENDS_ON -> [get_db]
# @RELATION: DEPENDS_ON -> [ConnectionConfig]
# [SECTION: IMPORTS]
from typing import List, Optional

View File

@@ -4,8 +4,8 @@
# @SEMANTICS: api, environments, superset, databases
# @PURPOSE: API endpoints for listing environments and their databases.
# @LAYER: API
# @RELATION: DEPENDS_ON -> backend.src.dependencies
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
# @RELATION: DEPENDS_ON -> [AppDependencies]
# @RELATION: DEPENDS_ON -> [SupersetClient]
#
# @INVARIANT: Environment IDs must exist in the configuration.

View File

@@ -768,6 +768,7 @@ async def delete_gitea_repository(
# @POST: Repository is initialized on disk and a GitRepository record is saved in DB.
# @PARAM: dashboard_ref (str)
# @PARAM: init_data (RepoInitRequest)
# @RELATION: CALLS -> GitService.init_repo
@router.post("/repositories/{dashboard_ref}/init")
async def init_repository(
dashboard_ref: str,
@@ -1089,6 +1090,7 @@ async def push_changes(
# @PRE: `dashboard_ref` repository exists and has a remote configured.
# @POST: Remote changes are fetched and merged into the local branch.
# @PARAM: dashboard_ref (str)
# @RELATION: CALLS -> GitService.pull
@router.post("/repositories/{dashboard_ref}/pull")
async def pull_changes(
dashboard_ref: str,
@@ -1217,6 +1219,7 @@ async def get_merge_conflicts(
# @PURPOSE: Apply mine/theirs/manual conflict resolutions from WebUI and stage files.
# @PRE: `dashboard_ref` resolves; request contains at least one resolution item.
# @POST: Resolved files are staged in index.
# @RELATION: CALLS -> GitService.resolve_conflicts
@router.post("/repositories/{dashboard_ref}/merge/resolve")
async def resolve_merge_conflicts(
dashboard_ref: str,
@@ -1276,6 +1279,7 @@ async def abort_merge(
# @PURPOSE: Finalize unfinished merge from WebUI flow.
# @PRE: All conflicts are resolved and staged.
# @POST: Merge commit is created.
# @RELATION: CALLS -> GitService.continue_merge
@router.post("/repositories/{dashboard_ref}/merge/continue")
async def continue_merge(
dashboard_ref: str,
@@ -1306,6 +1310,7 @@ async def continue_merge(
# @POST: Dashboard YAMLs are exported from Superset and committed to Git.
# @PARAM: dashboard_ref (str)
# @PARAM: source_env_id (Optional[str])
# @RELATION: CALLS -> GitPlugin.execute
@router.post("/repositories/{dashboard_ref}/sync")
async def sync_dashboard(
dashboard_ref: str,
@@ -1343,6 +1348,7 @@ async def sync_dashboard(
# @PURPOSE: Promote changes between branches via MR or direct merge.
# @PRE: dashboard repository is initialized and Git config is valid.
# @POST: Returns promotion result metadata.
# @RELATION: CALLS -> GitPlugin.execute
@router.post("/repositories/{dashboard_ref}/promote", response_model=PromoteResponse)
async def promote_dashboard(
dashboard_ref: str,
@@ -1493,6 +1499,7 @@ async def get_environments(
# @POST: Dashboard YAMLs are read from Git and imported into the target Superset.
# @PARAM: dashboard_ref (str)
# @PARAM: deploy_data (DeployRequest)
# @RELATION: CALLS -> GitPlugin.execute
@router.post("/repositories/{dashboard_ref}/deploy")
async def deploy_dashboard(
dashboard_ref: str,
@@ -1670,6 +1677,7 @@ async def get_repository_diff(
# @PURPOSE: Generate a suggested commit message using LLM.
# @PRE: Repository for `dashboard_ref` is initialized.
# @POST: Returns a suggested commit message string.
# @RELATION: CALLS -> GitService.generate_commit_message
@router.post("/repositories/{dashboard_ref}/generate-message")
async def generate_commit_message(
dashboard_ref: str,

View File

@@ -4,9 +4,9 @@
# @SEMANTICS: api, mappings, database, fuzzy-matching
# @PURPOSE: API endpoints for managing database mappings and getting suggestions.
# @LAYER: API
# @RELATION: DEPENDS_ON -> backend.src.dependencies
# @RELATION: DEPENDS_ON -> backend.src.core.database
# @RELATION: DEPENDS_ON -> backend.src.services.mapping_service
# @RELATION: DEPENDS_ON -> [AppDependencies]
# @RELATION: DEPENDS_ON -> [DatabaseModule]
# @RELATION: DEPENDS_ON -> [mapping_service]
#
# @INVARIANT: Mappings are persisted in the SQLite database.

View File

@@ -4,7 +4,8 @@
# @SEMANTICS: storage, files, upload, download, backup, repository
# @PURPOSE: API endpoints for file storage management (backups and repositories).
# @LAYER: API
# @RELATION: DEPENDS_ON -> [backend.src.models.storage]
# @RELATION: DEPENDS_ON -> [StorageModels]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
#
# @INVARIANT: All paths must be validated against path traversal.
@@ -31,8 +32,7 @@ router = APIRouter(tags=["storage"])
# @PARAM: category (Optional[FileCategory]) - Filter by category.
# @PARAM: path (Optional[str]) - Subpath within the category.
# @RETURN: List[StoredFile] - List of files/directories.
#
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.list_files]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
@router.get("/files", response_model=List[StoredFile])
async def list_files(
category: Optional[FileCategory] = None,
@@ -63,7 +63,7 @@ async def list_files(
#
# @SIDE_EFFECT: Writes file to the filesystem.
#
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.save_file]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
@router.post("/upload", response_model=StoredFile, status_code=201)
async def upload_file(
category: FileCategory = Form(...),
@@ -95,7 +95,7 @@ async def upload_file(
#
# @SIDE_EFFECT: Deletes item from the filesystem.
#
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.delete_file]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
@router.delete("/files/{category}/{path:path}", status_code=204)
async def delete_file(
category: FileCategory,
@@ -126,7 +126,7 @@ async def delete_file(
# @PARAM: path (str) - Relative path of the file.
# @RETURN: FileResponse - The file content.
#
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.get_file_path]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
@router.get("/download/{category}/{path:path}")
async def download_file(
category: FileCategory,
@@ -158,8 +158,7 @@ async def download_file(
# @PARAM: path (str) - Absolute or storage-root-relative file path.
# @RETURN: FileResponse - The file content.
#
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.get_storage_root]
# @RELATION: CALLS -> [backend.src.plugins.storage.plugin.StoragePlugin.validate_path]
# @RELATION: DEPENDS_ON -> [StoragePlugin]
@router.get("/file")
async def get_file_by_path(
path: str,

View File

@@ -73,6 +73,7 @@ app = FastAPI(
# [DEF:ensure_initial_admin_user:Function]
# @COMPLEXITY: 3
# @PURPOSE: Ensures initial admin user exists when bootstrap env flags are enabled.
# @RELATION: DEPENDS_ON -> AuthRepository
def ensure_initial_admin_user() -> None:
raw_flag = os.getenv("INITIAL_ADMIN_CREATE", "false").strip().lower()
if raw_flag not in {"1", "true", "yes", "on"}:

View File

@@ -3,17 +3,14 @@
# @SEMANTICS: auth, repository, database, user, role, permission
# @PURPOSE: Data access layer for authentication and user preference entities.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> [Session]
# @RELATION: DEPENDS_ON -> [User]
# @RELATION: DEPENDS_ON -> [Role]
# @RELATION: DEPENDS_ON -> [Permission]
# @RELATION: DEPENDS_ON -> [UserDashboardPreference]
# @RELATION: DEPENDS_ON -> [AuthModels]
# @RELATION: DEPENDS_ON -> [ProfileModels]
# @RELATION: DEPENDS_ON -> [belief_scope]
# @INVARIANT: All database read/write operations must execute via the injected SQLAlchemy session boundary.
# @DATA_CONTRACT: Session -> [User | Role | Permission | UserDashboardPreference]
# @DATA_CONTRACT: Input[sqlalchemy.orm.Session] -> Output[User|Role|Permission|UserDashboardPreference access]
# @PRE: Database connection is active.
# @POST: Provides valid access to identity data.
# @SIDE_EFFECT: None at module level.
# @SIDE_EFFECT: Executes database read queries through the injected SQLAlchemy session boundary.
# [SECTION: IMPORTS]
from typing import List, Optional
@@ -29,9 +26,8 @@ from ..logger import belief_scope, logger
# @PRE: Database session is bound.
# @POST: Entity instances returned safely.
# @SIDE_EFFECT: Performs database reads.
# @RELATION: DEPENDS_ON -> [Session]
# @RELATION: DEPENDS_ON -> [AuthModels]
class AuthRepository:
# @PURPOSE: Initialize repository with database session.
def __init__(self, db: Session):
self.db = db

View File

@@ -1,14 +1,17 @@
# [DEF:backend.src.core.migration_engine:Module]
# [DEF:MigrationEngineModule:Module]
#
# @COMPLEXITY: 5
# @SEMANTICS: migration, engine, zip, yaml, transformation, cross-filter, id-mapping
# @PURPOSE: Transforms Superset export ZIP archives while preserving archive integrity and patching mapped identifiers.
# @LAYER: Domain
# @RELATION: [DEPENDS_ON] ->[src.core.logger]
# @RELATION: [DEPENDS_ON] ->[src.core.mapping_service.IdMappingService]
# @RELATION: [DEPENDS_ON] ->[src.models.mapping.ResourceType]
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
# @RELATION: [DEPENDS_ON] ->[IdMappingService]
# @RELATION: [DEPENDS_ON] ->[ResourceType]
# @RELATION: [DEPENDS_ON] ->[yaml]
#
# @PRE: Input archives are readable Superset exports and optional mapping collaborators expose remote id lookup APIs.
# @POST: Migration engine contracts preserve ZIP integrity while exposing transformation entrypoints for import pipelines.
# @SIDE_EFFECT: Reads and writes temporary archive contents during transformation workflows and emits structured belief-state logs.
# @DATA_CONTRACT: Input[zip_path, output_path, db_mapping, target_env_id?, fix_cross_filters?] -> Output[Transformed Superset archive]
# @INVARIANT: ZIP structure and non-targeted metadata must remain valid after transformation.
# [SECTION: IMPORTS]
@@ -296,7 +299,6 @@ class MigrationEngine:
# [/DEF:_patch_dashboard_metadata:Function]
# [/DEF:MigrationEngine:Class]
# [/DEF:backend.src.core.migration_engine:Module]
# [/DEF:MigrationEngineModule:Module]

View File

@@ -19,6 +19,7 @@ from datetime import datetime, time, timedelta, date
# @COMPLEXITY: 3
# @SEMANTICS: scheduler, service, apscheduler
# @PURPOSE: Provides a service to manage scheduled backup tasks.
# @RELATION: DEPENDS_ON -> ThrottledSchedulerConfigurator; CALLS -> asyncio
class SchedulerService:
# [DEF:__init__:Function]
# @PURPOSE: Initializes the scheduler service with task and config managers.

View File

@@ -19,6 +19,7 @@ def task_logger(mock_add_log):
# @TEST_CONTRACT: TaskLoggerModel -> Invariants
# [DEF:test_task_logger_initialization:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger initializes with correct task_id and state.
def test_task_logger_initialization(task_logger):
"""Verify TaskLogger is bound to specific task_id and source."""
assert task_logger._task_id == "test_123"
@@ -29,6 +30,7 @@ def test_task_logger_initialization(task_logger):
# [DEF:test_log_methods_delegation:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger delegates log method calls to the underlying persistence service.
def test_log_methods_delegation(task_logger, mock_add_log):
"""Verify info, error, warning, debug delegate to internal _log."""
task_logger.info("info message", metadata={"k": "v"})
@@ -72,6 +74,7 @@ def test_log_methods_delegation(task_logger, mock_add_log):
# [DEF:test_with_source:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger.with_source returns a new logger with the correct source attribution.
def test_with_source(task_logger):
"""Verify with_source returns a new instance with updated default source."""
new_logger = task_logger.with_source("new_source")
@@ -85,6 +88,7 @@ def test_with_source(task_logger):
# [DEF:test_missing_task_id:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger raises or handles missing task_id gracefully.
def test_missing_task_id():
with pytest.raises(TypeError):
TaskLogger(add_log_fn=lambda x: x)
@@ -95,6 +99,7 @@ def test_missing_task_id():
# [DEF:test_invalid_add_log_fn:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger raises ValueError for invalid add_log_fn parameter.
def test_invalid_add_log_fn():
logger = TaskLogger(task_id="msg", add_log_fn=None)
with pytest.raises(TypeError):
@@ -105,6 +110,7 @@ def test_invalid_add_log_fn():
# [DEF:test_progress_log:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
# @PURPOSE: Verify TaskLogger correctly logs progress updates with percentage and message.
def test_progress_log(task_logger, mock_add_log):
"""Verify progress method correctly formats metadata."""
task_logger.progress("Step 1", 45.5)

View File

@@ -13,6 +13,8 @@ from ..config_manager import ConfigManager
# [DEF:TaskCleanupService:Class]
# @PURPOSE: Provides methods to clean up old task records and their associated logs.
# @COMPLEXITY: 3
# @RELATION: DEPENDS_ON -> Task_manager
# @RELATION: DEPENDS_ON -> ThrottledSchedulerConfigurator, CALL -> -> TaskCleanupService
class TaskCleanupService:
# [DEF:__init__:Function]
# @PURPOSE: Initializes the cleanup service with dependencies.

View File

@@ -15,7 +15,6 @@
# @RELATION: [DEPENDS_ON] ->[JobLifecycle]
# @RELATION: [DEPENDS_ON] ->[EventBus]
# @INVARIANT: Task IDs are unique.
# @CONSTRAINT: Must use belief_scope for logging.
# @TEST_CONTRACT: TaskManagerRuntime -> {
# required_fields: {plugin_loader: PluginLoader},
# optional_fields: {},

View File

@@ -1,3 +1,3 @@
# [DEF:src.core.utils:Package]
# [DEF:CoreUtils:Package]
# @PURPOSE: Shared utility package root.
# [/DEF:src.core.utils:Package]
# [/DEF:CoreUtils:Package]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.core.utils.dataset_mapper:Module]
# [DEF:DatasetMapperModule:Module]
#
# @SEMANTICS: dataset, mapping, postgresql, xlsx, superset
# @PURPOSE: Этот модуль отвечает за обновление метаданных (verbose_map) в датасетах Superset, извлекая их из PostgreSQL или XLSX-файлов.
@@ -234,4 +234,4 @@ class DatasetMapper:
# [/DEF:run_mapping:Function]
# [/DEF:DatasetMapper:Class]
# [/DEF:backend.core.utils.dataset_mapper:Module]
# [/DEF:DatasetMapperModule:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.core.utils.matching:Module]
# [DEF:FuzzyMatching:Module]
#
# @SEMANTICS: fuzzy, matching, rapidfuzz, database, mapping
# @PURPOSE: Provides utility functions for fuzzy matching database names.
@@ -52,4 +52,4 @@ def suggest_mappings(source_databases: List[Dict], target_databases: List[Dict],
return suggestions
# [/DEF:suggest_mappings:Function]
# [/DEF:backend.src.core.utils.matching:Module]
# [/DEF:FuzzyMatching:Module]

View File

@@ -42,6 +42,7 @@ class TaskType(str, Enum):
# @INVARIANT: TaskStatus enum mapping logic holds.
# @SEMANTICS: enum, status, task
# @PURPOSE: Supported normalized report status values.
# @RELATION: DEPENDS_ON -> ReportModels
class ReportStatus(str, Enum):
SUCCESS = "success"
FAILED = "failed"
@@ -70,6 +71,7 @@ class ReportStatus(str, Enum):
# }
# @TEST_FIXTURE: basic_error -> {"message": "Connection timeout", "code": "ERR_504", "next_actions": ["retry"]}
# @TEST_EDGE: missing_message -> {"code": "ERR_504"}
# @RELATION: DEPENDS_ON -> ReportModels
class ErrorContext(BaseModel):
code: Optional[str] = None
message: str
@@ -114,6 +116,7 @@ class ErrorContext(BaseModel):
# @TEST_EDGE: empty_summary -> {"report_id": "rep-123", "task_id": "task-456", "task_type": "migration", "status": "success", "updated_at": "2026-02-26T12:00:00Z", "summary": ""}
# @TEST_EDGE: invalid_task_type -> {"report_id": "rep-123", "task_id": "task-456", "task_type": "invalid_type", "status": "success", "updated_at": "2026-02-26T12:00:00Z", "summary": "Done"}
# @TEST_INVARIANT: non_empty_validators -> verifies: [empty_report_id, empty_summary]
# @RELATION: DEPENDS_ON -> ReportModels
class TaskReport(BaseModel):
report_id: str
task_id: str
@@ -162,6 +165,7 @@ class TaskReport(BaseModel):
# @TEST_EDGE: invalid_sort_by -> {"sort_by": "unknown_field"}
# @TEST_EDGE: invalid_time_range -> {"time_from": "2026-02-26T12:00:00Z", "time_to": "2026-02-25T12:00:00Z"}
# @TEST_INVARIANT: attribute_constraints_enforced -> verifies: [invalid_page_size_large, invalid_sort_by, invalid_time_range]
# @RELATION: DEPENDS_ON -> ReportModels
class ReportQuery(BaseModel):
page: int = Field(default=1, ge=1)
page_size: int = Field(default=20, ge=1, le=100)
@@ -213,6 +217,7 @@ class ReportQuery(BaseModel):
# }
# @TEST_FIXTURE: empty_collection -> {"items": [], "total": 0, "page": 1, "page_size": 20, "has_next": False, "applied_filters": {}}
# @TEST_EDGE: negative_total -> {"items": [], "total": -5, "page": 1, "page_size": 20, "has_next": False, "applied_filters": {}}
# @RELATION: DEPENDS_ON -> ReportModels
class ReportCollection(BaseModel):
items: List[TaskReport]
total: int = Field(ge=0)
@@ -238,6 +243,7 @@ class ReportCollection(BaseModel):
# }
# @TEST_FIXTURE: valid_detail -> {"report": {"report_id": "rep-1", "task_id": "task-1", "task_type": "backup", "status": "success", "updated_at": "2026-02-26T12:00:00Z", "summary": "Done"}}
# @TEST_EDGE: missing_report -> {}
# @RELATION: DEPENDS_ON -> ReportModels
class ReportDetailView(BaseModel):
report: TaskReport
timeline: List[Dict[str, Any]] = Field(default_factory=list)

View File

@@ -4,7 +4,6 @@
# @LAYER: Plugins
# @RELATION: Inherits from PluginBase. Uses SupersetClient from core.
# @RELATION: USES -> TaskContext
# @CONSTRAINT: Must use belief_scope for logging.
# [SECTION: IMPORTS]
from typing import Dict, Any, Optional

View File

@@ -1,3 +1,3 @@
# [DEF:src.plugins.git:Package]
# [DEF:GitPluginExt:Package]
# @PURPOSE: Git plugin extension package root.
# [/DEF:src.plugins.git:Package]
# [/DEF:GitPluginExt:Package]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.plugins.git_plugin:Module]
# [DEF:GitPluginModule:Module]
#
# @SEMANTICS: git, plugin, dashboard, version_control, sync, deploy
# @PURPOSE: Предоставляет плагин для версионирования и развертывания дашбордов Superset.
@@ -398,4 +398,4 @@ class GitPlugin(PluginBase):
# [/DEF:initialize:Function]
# [/DEF:GitPlugin:Class]
# [/DEF:backend.src.plugins.git_plugin:Module]
# [/DEF:GitPluginModule:Module]

View File

@@ -1,8 +1,11 @@
# [DEF:backend/src/plugins/llm_analysis/models.py:Module]
# [DEF:LLMAnalysisModels:Module]
# @COMPLEXITY: 3
# @SEMANTICS: pydantic, models, llm
# @PURPOSE: Define Pydantic models for LLM Analysis plugin.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> pydantic
# @RELATION: DEPENDS_on -> pydantic
# @RELATION: DEPENDs_on -> pydantic
from typing import List, Optional
from pydantic import BaseModel, Field
@@ -59,4 +62,4 @@ class ValidationResult(BaseModel):
raw_response: Optional[str] = None
# [/DEF:ValidationResult:Class]
# [/DEF:backend/src/plugins/llm_analysis/models.py:Module]
# [/DEF:LLMAnalysisModels:Module]

View File

@@ -4,7 +4,6 @@
# @LAYER: Plugins
# @RELATION: Inherits from PluginBase. Uses DatasetMapper from superset_tool.
# @RELATION: USES -> TaskContext
# @CONSTRAINT: Must use belief_scope for logging.
# [SECTION: IMPORTS]
from typing import Dict, Any, Optional

View File

@@ -26,12 +26,14 @@ from ..core.task_manager.context import TaskContext
# [DEF:MigrationPlugin:Class]
# @PURPOSE: Implementation of the migration plugin workflow and transformation orchestration.
# @PRE: Plugin loader must register this instance.
# @POST: Provides migration UI schema and executes atomic dashboard transfers.
# @PRE: SupersetClient authenticated, database session active
# @POST: Returns MigrationResult with success/failure status and artifact list
# @TEST_FIXTURE: superset_export_zip -> file:backend/tests/fixtures/migration/dashboard_export.zip
# @TEST_FIXTURE: db_mapping_payload -> INLINE_JSON: {"db_mappings": {"source_uuid_1": "target_uuid_2"}}
# @TEST_FIXTURE: password_inject_payload -> INLINE_JSON: {"passwords": {"PostgreSQL": "secret123"}}
# @TEST_INVARIANT: strict_db_isolation -> VERIFIED_BY: [successful_dashboard_transfer, missing_mapping_resolution]
# @SIDE_EFFECT: Writes migration artifacts to database, triggers dashboard imports
# @DATA_CONTRACT: MigrationPlan AST, DryRunResult, RiskAssessment
class MigrationPlugin(PluginBase):
"""
A plugin to migrate Superset dashboards between environments.

View File

@@ -4,7 +4,6 @@
# @LAYER: Plugins
# @RELATION: Inherits from PluginBase. Uses SupersetClient from core.
# @RELATION: USES -> TaskContext
# @CONSTRAINT: Must use belief_scope for logging.
# [SECTION: IMPORTS]
import re

View File

@@ -3,6 +3,7 @@
# @SEMANTICS: health, schemas, pydantic
# @PURPOSE: Pydantic schemas for dashboard health summary.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> pydantic
from pydantic import BaseModel, Field
from typing import List, Optional

View File

@@ -18,6 +18,7 @@ from pydantic import BaseModel, Field
# [DEF:ProfilePermissionState:Class]
# @COMPLEXITY: 3
# @PURPOSE: Represents one permission badge state for profile read-only security view.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class ProfilePermissionState(BaseModel):
key: str
allowed: bool
@@ -29,6 +30,7 @@ class ProfilePermissionState(BaseModel):
# [DEF:ProfileSecuritySummary:Class]
# @COMPLEXITY: 3
# @PURPOSE: Read-only security and access snapshot for current user.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class ProfileSecuritySummary(BaseModel):
read_only: bool = True
auth_source: Optional[str] = None
@@ -44,6 +46,7 @@ class ProfileSecuritySummary(BaseModel):
# [DEF:ProfilePreference:Class]
# @COMPLEXITY: 3
# @PURPOSE: Represents persisted profile preference for a single authenticated user.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class ProfilePreference(BaseModel):
user_id: str
superset_username: Optional[str] = None
@@ -77,6 +80,7 @@ class ProfilePreference(BaseModel):
# [DEF:ProfilePreferenceUpdateRequest:Class]
# @COMPLEXITY: 3
# @PURPOSE: Request payload for updating current user's profile settings.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class ProfilePreferenceUpdateRequest(BaseModel):
superset_username: Optional[str] = Field(
default=None,
@@ -138,6 +142,7 @@ class ProfilePreferenceUpdateRequest(BaseModel):
# [DEF:ProfilePreferenceResponse:Class]
# @COMPLEXITY: 3
# @PURPOSE: Response envelope for profile preference read/update endpoints.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class ProfilePreferenceResponse(BaseModel):
status: Literal["success", "error"] = "success"
message: Optional[str] = None
@@ -152,6 +157,7 @@ class ProfilePreferenceResponse(BaseModel):
# [DEF:SupersetAccountLookupRequest:Class]
# @COMPLEXITY: 3
# @PURPOSE: Query contract for Superset account lookup by selected environment.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class SupersetAccountLookupRequest(BaseModel):
environment_id: str
search: Optional[str] = None
@@ -167,6 +173,7 @@ class SupersetAccountLookupRequest(BaseModel):
# [DEF:SupersetAccountCandidate:Class]
# @COMPLEXITY: 3
# @PURPOSE: Canonical account candidate projected from Superset users payload.
# @RELATION: DEPENDS_ON -> ProfileSchemas
class SupersetAccountCandidate(BaseModel):
environment_id: str
username: str
@@ -181,6 +188,7 @@ class SupersetAccountCandidate(BaseModel):
# [DEF:SupersetAccountLookupResponse:Class]
# @COMPLEXITY: 3
# @PURPOSE: Response envelope for Superset account lookup (success or degraded mode).
# @RELATION: DEPENDS_ON -> ProfileSchemas
class SupersetAccountLookupResponse(BaseModel):
status: Literal["success", "degraded"]
environment_id: str

View File

@@ -3,6 +3,7 @@
# @SEMANTICS: settings, schemas, pydantic, validation
# @PURPOSE: Pydantic schemas for application settings and automation policies.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> pydantic
from pydantic import BaseModel, Field
from typing import List, Optional

View File

@@ -3,6 +3,7 @@
# @SEMANTICS: cli, clean-release, candidate, artifacts, manifest
# @PURPOSE: Provide headless CLI commands for candidate registration, artifact import and manifest build.
# @LAYER: Scripts
# @RELATION: CALLS -> ComplianceOrchestrator
from __future__ import annotations

View File

@@ -164,6 +164,7 @@ async def test_get_health_summary_reuses_dashboard_metadata_cache_across_service
# [DEF:test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks:Function]
# @RELATION: BINDS_TO ->[test_health_service]
# @PURPOSE: Verify that deleting a validation report also removes dashboard scope and linked tasks.
def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks():
db = MagicMock()
config_manager = MagicMock()
@@ -239,6 +240,7 @@ def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks():
# [DEF:test_delete_validation_report_returns_false_for_unknown_record:Function]
# @RELATION: BINDS_TO ->[test_health_service]
# @PURPOSE: Verify delete returns False when validation record does not exist.
def test_delete_validation_report_returns_false_for_unknown_record():
db = MagicMock()
db.query.return_value.filter.return_value.first.return_value = None
@@ -253,6 +255,7 @@ def test_delete_validation_report_returns_false_for_unknown_record():
# [DEF:test_delete_validation_report_swallows_linked_task_cleanup_failure:Function]
# @RELATION: BINDS_TO ->[test_health_service]
# @PURPOSE: Verify delete swallows exceptions when cleaning up linked tasks.
def test_delete_validation_report_swallows_linked_task_cleanup_failure():
db = MagicMock()
config_manager = MagicMock()

View File

@@ -81,6 +81,7 @@ async def test_get_dashboards_with_status():
# @TEST: get_datasets_with_status returns datasets with task status
# @PRE: SupersetClient returns dataset list
# @POST: Each dataset has last_task field
# @PURPOSE: Verify ResourceService.get_datasets_with_status returns datasets grouped by validation status.
@pytest.mark.asyncio
async def test_get_datasets_with_status():
with patch("src.services.resource_service.SupersetClient") as mock_client:
@@ -121,6 +122,7 @@ async def test_get_datasets_with_status():
# @TEST: get_activity_summary returns active count and recent tasks
# @PRE: tasks list provided
# @POST: Returns dict with active_count and recent_tasks
# @PURPOSE: Verify ResourceService.get_activity_summary returns recent task activity.
def test_get_activity_summary():
from src.services.resource_service import ResourceService
@@ -159,6 +161,7 @@ def test_get_activity_summary():
# @TEST: _get_git_status_for_dashboard returns None when no repo exists
# @PRE: GitService returns None for repo
# @POST: Returns None
# @PURPOSE: Verify get_git_status_for_dashboard returns None when no repo exists.
def test_get_git_status_for_dashboard_no_repo():
with patch("src.services.resource_service.GitService") as mock_git:
from src.services.resource_service import ResourceService
@@ -181,6 +184,7 @@ def test_get_git_status_for_dashboard_no_repo():
# @TEST: _get_last_task_for_resource returns most recent task for resource
# @PRE: tasks list with matching resource_id
# @POST: Returns task summary with task_id and status
# @PURPOSE: Verify get_last_task_for_resource returns the most recent task for a given resource.
def test_get_last_task_for_resource():
from src.services.resource_service import ResourceService
@@ -214,6 +218,7 @@ def test_get_last_task_for_resource():
# @TEST: _extract_resource_name_from_task extracts name from params
# @PRE: task has resource_name in params
# @POST: Returns resource name or fallback
# @PURPOSE: Verify extract_resource_name_from_task correctly parses resource names from task identifiers.
def test_extract_resource_name_from_task():
from src.services.resource_service import ResourceService
@@ -244,6 +249,7 @@ def test_extract_resource_name_from_task():
# @TEST: _get_last_task_for_resource returns None for empty tasks list
# @PRE: tasks is empty list
# @POST: Returns None
# @PURPOSE: Verify get_last_task_for_resource returns None when tasks list is empty.
def test_get_last_task_for_resource_empty_tasks():
from src.services.resource_service import ResourceService
@@ -261,6 +267,7 @@ def test_get_last_task_for_resource_empty_tasks():
# @TEST: _get_last_task_for_resource returns None when no tasks match resource_id
# @PRE: tasks list has no matching resource_id
# @POST: Returns None
# @PURPOSE: Verify get_last_task_for_resource returns None when no task matches the resource.
def test_get_last_task_for_resource_no_match():
from src.services.resource_service import ResourceService
@@ -284,6 +291,7 @@ def test_get_last_task_for_resource_no_match():
# @TEST: get_dashboards_with_status handles mixed naive/aware datetimes without comparison errors.
# @PRE: Task list includes both timezone-aware and timezone-naive timestamps.
# @POST: Latest task is selected deterministically and no exception is raised.
# @PURPOSE: Verify get_dashboards_with_status handles mixed naive and aware datetimes without crashing.
@pytest.mark.asyncio
async def test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_datetimes():
with (
@@ -327,6 +335,7 @@ async def test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_dat
# @TEST: get_dashboards_with_status keeps latest task identity while falling back to older decisive validation status.
# @PRE: Same dashboard has older WARN and newer UNKNOWN validation tasks.
# @POST: Returned last_task points to newest task but preserves WARN as last meaningful validation state.
# @PURPOSE: Verify status ranking prefers decisive validation over newer unknown status.
@pytest.mark.anyio
async def test_get_dashboards_with_status_prefers_latest_decisive_validation_status_over_newer_unknown():
with (
@@ -376,6 +385,7 @@ async def test_get_dashboards_with_status_prefers_latest_decisive_validation_sta
# @TEST: get_dashboards_with_status still returns newest UNKNOWN when no decisive validation exists.
# @PRE: Same dashboard has only UNKNOWN validation tasks.
# @POST: Returned last_task keeps newest UNKNOWN task.
# @PURPOSE: Verify fallback to latest unknown status when no decisive history exists.
@pytest.mark.anyio
async def test_get_dashboards_with_status_falls_back_to_latest_unknown_without_decisive_history():
with (
@@ -424,6 +434,7 @@ async def test_get_dashboards_with_status_falls_back_to_latest_unknown_without_d
# @TEST: _get_last_task_for_resource handles mixed naive/aware created_at values.
# @PRE: Matching tasks include naive and aware created_at timestamps.
# @POST: Latest task is returned without raising datetime comparison errors.
# @PURPOSE: Verify get_last_task_for_resource correctly sorts mixed naive and aware created_at timestamps.
def test_get_last_task_for_resource_handles_mixed_naive_and_aware_created_at():
from src.services.resource_service import ResourceService

View File

@@ -16,6 +16,7 @@ from src.services.clean_release.audit_service import (
@patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_preparation:Function]
# @RELATION: BINDS_TO -> TestAuditService
# @PURPOSE: Verify audit preparation stage correctly initializes and validates candidate state.
def test_audit_preparation(mock_logger):
audit_preparation("cand-1", "PREPARED")
mock_logger.info.assert_called_with(
@@ -29,6 +30,7 @@ def test_audit_preparation(mock_logger):
@patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_check_run:Function]
# @RELATION: BINDS_TO -> TestAuditService
# @PURPOSE: Verify audit check run executes all checks and collects results.
def test_audit_check_run(mock_logger):
audit_check_run("check-1", "COMPLIANT")
mock_logger.info.assert_called_with(
@@ -42,6 +44,7 @@ def test_audit_check_run(mock_logger):
@patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_report:Function]
# @RELATION: BINDS_TO -> TestAuditService
# @PURPOSE: Verify audit report generation aggregates check results into a structured report.
def test_audit_report(mock_logger):
audit_report("rep-1", "cand-1")
mock_logger.info.assert_called_with(

View File

@@ -5,6 +5,10 @@
# @LAYER: Domain
# @RELATION: [DEPENDS_ON] ->[ManifestBuilder]
# @INVARIANT: Same input artifacts produce identical deterministic hash.
# @PRE: Test fixtures are properly initialized
# @POST: All test assertions pass
# @SIDE_EFFECT: None - test isolation
# @DATA_CONTRACT: TestInput -> TestOutput
from src.services.clean_release.manifest_builder import build_distribution_manifest

View File

@@ -50,6 +50,7 @@ def enterprise_clean_setup():
# @TEST_SCENARIO: policy_valid
# [DEF:test_policy_valid:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify policy validation passes when all required fields are present and valid.
def test_policy_valid(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry)
@@ -64,6 +65,7 @@ def test_policy_valid(enterprise_clean_setup):
# [DEF:test_missing_registry_ref:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify policy validation fails when registry_ref is missing.
def test_missing_registry_ref(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
policy.internal_source_registry_ref = " "
@@ -79,6 +81,7 @@ def test_missing_registry_ref(enterprise_clean_setup):
# [DEF:test_conflicting_registry:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify policy engine rejects conflicting registry references.
def test_conflicting_registry(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
registry.registry_id = "WRONG-REG"
@@ -97,6 +100,7 @@ def test_conflicting_registry(enterprise_clean_setup):
# [DEF:test_classify_artifact:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify policy engine correctly classifies artifacts based on source and type.
def test_classify_artifact(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry)
@@ -121,6 +125,7 @@ def test_classify_artifact(enterprise_clean_setup):
# [DEF:test_validate_resource_source:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify validate_resource_source correctly validates or rejects resource source identifiers.
def test_validate_resource_source(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry)
@@ -141,6 +146,7 @@ def test_validate_resource_source(enterprise_clean_setup):
# [DEF:test_evaluate_candidate:Function]
# @RELATION: BINDS_TO -> TestPolicyEngine
# @PURPOSE: Verify policy engine evaluates release candidates against configured policies.
def test_evaluate_candidate(enterprise_clean_setup):
policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry)

View File

@@ -45,6 +45,7 @@ def _registry() -> ResourceSourceRegistry:
# [DEF:test_validate_internal_sources_all_internal_ok:Function]
# @RELATION: BINDS_TO -> TestSourceIsolation
# @PURPOSE: Verify validate_internal_sources passes when all sources are internal and allowed.
def test_validate_internal_sources_all_internal_ok():
result = validate_internal_sources(
registry=_registry(),
@@ -59,6 +60,7 @@ def test_validate_internal_sources_all_internal_ok():
# [DEF:test_validate_internal_sources_external_blocked:Function]
# @RELATION: BINDS_TO -> TestSourceIsolation
# @PURPOSE: Verify validate_internal_sources blocks external sources when policy requires internal-only.
def test_validate_internal_sources_external_blocked():
result = validate_internal_sources(
registry=_registry(),

View File

@@ -16,6 +16,7 @@ from src.services.clean_release.stages import derive_final_status, MANDATORY_STA
# [DEF:test_derive_final_status_compliant:Function]
# @RELATION: BINDS_TO -> TestStages
# @PURPOSE: Verify derive_final_status returns compliant when all stages pass.
def test_derive_final_status_compliant():
results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")
@@ -29,6 +30,7 @@ def test_derive_final_status_compliant():
# [DEF:test_derive_final_status_blocked:Function]
# @RELATION: BINDS_TO -> TestStages
# @PURPOSE: Verify derive_final_status returns blocked when any stage fails.
def test_derive_final_status_blocked():
results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")
@@ -43,6 +45,7 @@ def test_derive_final_status_blocked():
# [DEF:test_derive_final_status_failed_missing:Function]
# @RELATION: BINDS_TO -> TestStages
# @PURPOSE: Verify derive_final_status returns failed when required stages are missing.
def test_derive_final_status_failed_missing():
results = [
CheckStageResult(
@@ -57,6 +60,7 @@ def test_derive_final_status_failed_missing():
# [DEF:test_derive_final_status_failed_skipped:Function]
# @RELATION: BINDS_TO -> TestStages
# @PURPOSE: Verify derive_final_status returns failed when critical stages are skipped.
def test_derive_final_status_failed_skipped():
results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")

View File

@@ -52,8 +52,10 @@ class ComplianceExecutionResult:
# [DEF:ComplianceExecutionService:Class]
# @PURPOSE: Execute clean-release compliance lifecycle over trusted snapshots and immutable evidence.
# @PRE: repository and config_manager are initialized.
# @POST: run state, stage records, violations and optional report are persisted consistently.
# @PRE: Database session active, candidate registered
# @POST: Returns ComplianceReport with pass/fail status and violation details
# @SIDE_EFFECT: Updates compliance status in database, logs violations
# @DATA_CONTRACT: ComplianceCheckResult, ComplianceReport, Violation
class ComplianceExecutionService:
TASK_PLUGIN_ID = "clean-release-compliance"

View File

@@ -13,6 +13,10 @@
# @TEST_EDGE: missing_stage_result -> Finalization with incomplete/empty mandatory stage set must not produce COMPLIANT
# @TEST_EDGE: report_generation_error -> Downstream reporting failure does not alter orchestrator status derivation contract
# @TEST_INVARIANT: compliant_requires_all_mandatory_pass -> VERIFIED_BY: [stage_failure_blocks_release]
# @PRE: ManifestService and PolicyEngine are available
# @POST: OrchestrationResult with compliance status
# @SIDE_EFFECT: Triggers compliance checks; may modify manifest state
# @DATA_CONTRACT: Manifest -> ComplianceReport
from __future__ import annotations

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Data Transfer Objects for clean release compliance subsystem.
# @LAYER: Application
# @RELATION: DEPENDS_ON -> pydantic
from datetime import datetime
from typing import List, Optional, Dict, Any

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Canonical enums for clean release lifecycle and compliance.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> enum
from enum import Enum

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Domain exceptions for clean release compliance subsystem.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> Exception
class CleanReleaseError(Exception):
"""Base exception for clean release subsystem."""

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Unified entry point for clean release operations.
# @LAYER: Application
# @RELATION: DEPENDS_ON -> ComplianceOrchestrator
from typing import List, Optional
from src.services.clean_release.repositories import (

View File

@@ -9,6 +9,8 @@
# @PRE: Candidate exists and is PREPARED or MANIFEST_BUILT; artifacts are present.
# @POST: New immutable manifest is persisted with incremented version and deterministic digest.
# @INVARIANT: Existing manifests are never mutated.
# @SIDE_EFFECT: May modify manifest state during processing
# @DATA_CONTRACT: Manifest -> ManifestRecord; Candidate -> ManifestRecord
from __future__ import annotations

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Map between domain entities (SQLAlchemy models) and DTOs.
# @LAYER: Application
# @RELATION: DEPENDS_ON -> clean_release_dto
from typing import List
from src.models.clean_release import (

View File

@@ -6,6 +6,10 @@
# @RELATION: [DEPENDS_ON] ->[CleanReleaseModels]
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
# @INVARIANT: Enterprise-clean policy always treats non-registry sources as violations.
# @DATA_CONTRACT: Candidate -> PolicyDecision
# @PRE: PolicyRepository is accessible
# @POST: PolicyDecision returned with approval status
# @SIDE_EFFECT: Read-only policy evaluation; no state changes
from __future__ import annotations

View File

@@ -7,6 +7,10 @@
# @RELATION: [DEPENDS_ON] ->[RepositoryRelations]
# @RELATION: [DEPENDS_ON] ->[clean_release_exceptions]
# @INVARIANT: Trusted snapshot resolution is based only on ConfigManager active identifiers.
# @DATA_CONTRACT: PolicyRequest -> ResolutionResult
# @PRE: PolicyRepository and Manifest are available
# @POST: ResolutionResult with matched policies
# @SIDE_EFFECT: Read-only policy evaluation; logs resolution decisions
from __future__ import annotations

View File

@@ -12,6 +12,10 @@
# @TEST_EDGE: counter_mismatch -> blocking counter cannot exceed total violations counter
# @TEST_EDGE: missing_operator_summary -> non-terminal run prevents report creation and summary generation
# @TEST_INVARIANT: blocking_count_le_total_count -> VERIFIED_BY: [counter_mismatch, empty_violations_for_blocked]
# @DATA_CONTRACT: Manifest -> Report
# @PRE: ManifestService and compliance data are available
# @POST: Report with generated summary and @SIDE_EFFECT':
# @SIDE_EFFECT: Writes report artifacts to database, generates audit trail
from __future__ import annotations

View File

@@ -1,6 +1,7 @@
# [DEF:clean_release_repositories:Module]
# @COMPLEXITY: 3
# @PURPOSE: Export all clean release repositories.
# @RELATION: DEPENDS_ON -> sqlalchemy
from .candidate_repository import CandidateRepository
from .artifact_repository import ArtifactRepository

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query approval decisions.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query candidate artifacts.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query audit logs for clean release operations.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query release candidates.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query compliance runs, stage runs, and violations.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -1,53 +1,93 @@
# [DEF:manifest_repository:Module]
# [DEF:ManifestRepositoryModule:Module]
# @COMPLEXITY: 3
# @PURPOSE: Persist and query distribution manifests.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> DistributionManifest
# @RELATION: DEPENDS_ON -> sqlalchemy
# @RELATION: DEPENDS_ON -> belief_scope
from typing import Optional, List
from sqlalchemy.orm import Session
from src.models.clean_release import DistributionManifest
from src.core.logger import belief_scope
# [DEF:ManifestRepository:Class]
# @COMPLEXITY: 3
# @PURPOSE: Encapsulates database CRUD operations for DistributionManifest entities.
# @RELATION: DEPENDS_ON -> DistributionManifest
# @RELATION: DEPENDS_ON -> sqlalchemy.Session
class ManifestRepository:
"""
@PURPOSE: Encapsulates database operations for DistributionManifest.
"""
"""Repository for distribution manifest persistence."""
# [DEF:ManifestRepository.__init__:Function]
# @COMPLEXITY: 1
# @PURPOSE: Initialize repository with an active SQLAlchemy session.
# @PRE: db is a valid SQLAlchemy Session instance.
# @POST: Repository is ready for database operations.
def __init__(self, db: Session):
self.db = db
# [/DEF:ManifestRepository.__init__:Function]
# [DEF:ManifestRepository.save:Function]
# @COMPLEXITY: 3
# @PURPOSE: Persist a DistributionManifest to the database.
# @PRE: manifest is a valid DistributionManifest instance with required fields populated.
# @POST: Manifest is committed to database and refreshed with generated ID.
# @SIDE_EFFECT: Database commit via session.commit().
# @RELATION: DEPENDS_ON -> DistributionManifest
def save(self, manifest: DistributionManifest) -> DistributionManifest:
"""
@PURPOSE: Persist a manifest.
@POST: Manifest is committed and refreshed.
"""
with belief_scope("ManifestRepository.save"):
self.db.add(manifest)
self.db.commit()
self.db.refresh(manifest)
return manifest
# [/DEF:ManifestRepository.save:Function]
# [DEF:ManifestRepository.get_by_id:Function]
# @COMPLEXITY: 2
# @PURPOSE: Retrieve a single DistributionManifest by its primary key.
# @PRE: manifest_id is a valid string identifier.
# @POST: Returns DistributionManifest if found, None otherwise.
# @RELATION: DEPENDS_ON -> DistributionManifest
def get_by_id(self, manifest_id: str) -> Optional[DistributionManifest]:
"""
@PURPOSE: Retrieve a manifest by ID.
"""
with belief_scope("ManifestRepository.get_by_id"):
return self.db.query(DistributionManifest).filter(DistributionManifest.id == manifest_id).first()
return self.db.query(DistributionManifest).filter(
DistributionManifest.id == manifest_id
).first()
# [/DEF:ManifestRepository.get_by_id:Function]
# [DEF:ManifestRepository.get_latest_for_candidate:Function]
# @COMPLEXITY: 3
# @PURPOSE: Retrieve the most recent manifest version for a given candidate.
# @PRE: candidate_id is a valid string identifier.
# @POST: Returns the highest manifest_version manifest for the candidate, or None.
# @RELATION: DEPENDS_ON -> DistributionManifest
def get_latest_for_candidate(self, candidate_id: str) -> Optional[DistributionManifest]:
"""
@PURPOSE: Retrieve the latest manifest for a candidate.
"""
with belief_scope("ManifestRepository.get_latest_for_candidate"):
return self.db.query(DistributionManifest)\
.filter(DistributionManifest.candidate_id == candidate_id)\
.order_by(DistributionManifest.manifest_version.desc())\
return (
self.db.query(DistributionManifest)
.filter(DistributionManifest.candidate_id == candidate_id)
.order_by(DistributionManifest.manifest_version.desc())
.first()
)
# [/DEF:ManifestRepository.get_latest_for_candidate:Function]
# [DEF:ManifestRepository.list_by_candidate:Function]
# @COMPLEXITY: 2
# @PURPOSE: List all manifests for a specific candidate, ordered by version.
# @PRE: candidate_id is a valid string identifier.
# @POST: Returns a list of DistributionManifest instances (may be empty).
# @RELATION: DEPENDS_ON -> DistributionManifest
def list_by_candidate(self, candidate_id: str) -> List[DistributionManifest]:
"""
@PURPOSE: List all manifests for a specific candidate.
"""
with belief_scope("ManifestRepository.list_by_candidate"):
return self.db.query(DistributionManifest).filter(DistributionManifest.candidate_id == candidate_id).all()
return (
self.db.query(DistributionManifest)
.filter(DistributionManifest.candidate_id == candidate_id)
.all()
)
# [/DEF:ManifestRepository.list_by_candidate:Function]
# [/DEF:manifest_repository:Module]
# [/DEF:ManifestRepository:Class]
# [/DEF:ManifestRepositoryModule:Module]

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query policy and registry snapshots.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query publication records.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -2,6 +2,7 @@
# @COMPLEXITY: 3
# @PURPOSE: Persist and query compliance reports.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> sqlalchemy
from typing import Optional, List
from sqlalchemy.orm import Session

View File

@@ -69,6 +69,10 @@ class DatasetReviewSessionRepository:
# [DEF:init_repo:Function]
# @COMPLEXITY: 4
# @PURPOSE: Bind one live SQLAlchemy session to the repository instance.
# @RELATION: DEPENDS_ON -> DatasetReviewSessionRepository; CALLS -> sqlalchemy
# @PRE: db_session is not None
# @POST: Repository instance initialized with valid session
# @SIDE_EFFECT: None - pure initialization
def __init__(self, db: Session):
self.db = db
self.event_logger = SessionEventLogger(db)
@@ -208,6 +212,9 @@ class DatasetReviewSessionRepository:
# @PURPOSE: Return the full session aggregate for API and frontend resume flows.
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
# @RELATION: [DEPENDS_ON] -> [SessionCollaborator]
# @PRE: session_id is a valid UUID; db_session is active
# @POST: Returns SessionDetail with all fields populated
# @SIDE_EFFECT: Read-only database operation
def load_session_detail(
self, session_id: str, user_id: str
) -> Optional[DatasetReviewSession]:
@@ -334,6 +341,9 @@ class DatasetReviewSessionRepository:
# @RELATION: [DEPENDS_ON] -> [ImportedFilter]
# @RELATION: [DEPENDS_ON] -> [TemplateVariable]
# @RELATION: [DEPENDS_ON] -> [ExecutionMapping]
# @PRE: session_id is a valid UUID; recovery_state is a valid dict
# @POST: Recovery state persisted to database
# @SIDE_EFFECT: Writes to database
def save_recovery_state(
self,
session_id: str,

View File

@@ -34,6 +34,7 @@ from src.core.database import SessionLocal
# [DEF:GitService:Class]
# @COMPLEXITY: 3
# @PURPOSE: Wrapper for GitPython operations with semantic logging and error handling.
# @RELATION: DEPENDS_ON -> git
class GitService:
"""
Wrapper for GitPython operations.

View File

@@ -87,6 +87,7 @@ DEFAULT_LLM_ASSISTANT_SETTINGS: Dict[str, str] = {
# @PURPOSE: Ensure llm settings contain stable schema with prompts section and default templates.
# @PRE: llm_settings is dictionary-like value or None.
# @POST: Returned dict contains prompts with all required template keys.
# @RELATION: DEPENDS_ON -> LLMProviderService
def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
normalized: Dict[str, Any] = {
"providers": [],
@@ -131,6 +132,7 @@ def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
# @PURPOSE: Heuristically determine whether model supports image input required for dashboard validation.
# @PRE: model_name may be empty or mixed-case.
# @POST: Returns True when model likely supports multimodal input.
# @RELATION: DEPENDS_ON -> LLMProviderService
def is_multimodal_model(model_name: str, provider_type: Optional[str] = None) -> bool:
token = (model_name or "").strip().lower()
if not token:
@@ -173,6 +175,7 @@ def is_multimodal_model(model_name: str, provider_type: Optional[str] = None) ->
# @PURPOSE: Resolve provider id configured for a task binding with fallback to default provider.
# @PRE: llm_settings is normalized or raw dict from config.
# @POST: Returns configured provider id or fallback id/empty string when not defined.
# @RELATION: DEPENDS_ON -> LLMProviderService
def resolve_bound_provider_id(llm_settings: Any, task_key: str) -> str:
normalized = normalize_llm_settings(llm_settings)
bindings = normalized.get("provider_bindings", {})
@@ -189,6 +192,7 @@ def resolve_bound_provider_id(llm_settings: Any, task_key: str) -> str:
# @PURPOSE: Render prompt template using deterministic placeholder replacement with graceful fallback.
# @PRE: template is a string and variables values are already stringifiable.
# @POST: Returns rendered prompt text with known placeholders substituted.
# @RELATION: DEPENDS_ON -> LLMProviderService
def render_prompt(template: str, variables: Dict[str, Any]) -> str:
rendered = template
for key, value in variables.items():

View File

@@ -12,8 +12,11 @@
# @RELATION: [DEPENDS_ON] ->[ValidationPolicy]
# @RELATION: [DEPENDS_ON] ->[UserDashboardPreference]
#
# @INVARIANT: Notifications are dispatched asynchronously via BackgroundTasks.
# @INVARIANT: Missing profile or provider config must not crash the pipeline.
# @INVARIANT: NotificationService maintains singleton pattern for per-channel notifications
# @DATA_CONTRACT: NotificationChannelConfig -> NotificationRecipient
# @PRE: channel_config is loaded
# @POST: Notification dispatched via configured providers
# @SIDE_EFFECT: Sends notifications via configured providers
from typing import Any, Dict, List, Optional
from fastapi import BackgroundTasks

View File

@@ -11,8 +11,7 @@
# @RELATION: DEPENDS_ON -> [User]
# @RELATION: DEPENDS_ON -> [sqlalchemy.orm.Session]
#
# @INVARIANT: Preference mutations are always scoped to authenticated user identity.
# @INVARIANT: Username normalization is trim+lower and shared by save and matching paths.
# @INVARIANT: Profile ID needs to unique per-user session
#
# @TEST_CONTRACT: ProfilePreferenceUpdateRequest -> ProfilePreferenceResponse
# @TEST_FIXTURE: valid_profile_update -> {"user_id":"u-1","superset_username":"John_Doe","show_only_my_dashboards":true}
@@ -20,6 +19,10 @@
# @TEST_EDGE: cross_user_mutation -> attempt to update another user preference returns forbidden
# @TEST_EDGE: lookup_env_not_found -> unknown environment_id returns not found
# @TEST_INVARIANT: normalization_consistency -> VERIFIED_BY: [valid_profile_update, enable_without_username]
# @DATA_CONTRACT: Profile_id -> ProfileInfo; session_id -> valid UUID
# @PRE: Session is active and valid
# @POST: Profile with updated fields populated and
# @SIDE_EFFECT: Database read/write operations
# [SECTION: IMPORTS]
from datetime import datetime
@@ -98,6 +101,7 @@ class ProfileAuthorizationError(Exception):
# @POST: Preference operations remain user-scoped and return normalized profile/lookup responses.
# @SIDE_EFFECT: Writes preference records and encrypted tokens; performs external account lookups when requested.
# @DATA_CONTRACT: Input[User,ProfilePreferenceUpdateRequest|SupersetAccountLookupRequest] -> Output[ProfilePreferenceResponse|SupersetAccountLookupResponse|bool]
# @INVARIANT: Profile data integrity maintained, cache consistency with database state
class ProfileService:
# [DEF:init:Function]
# @RELATION: BINDS_TO -> ProfileService

View File

@@ -11,6 +11,7 @@ from src.services.reports.type_profiles import resolve_task_type, get_type_profi
# @TEST_INVARIANT: fallback_to_unknown
# [DEF:test_resolve_task_type_fallbacks:Function]
# @RELATION: BINDS_TO -> __tests__/test_report_type_profiles
# @PURPOSE: Verify resolve_task_type_fallbacks returns correct fallback type when primary is missing.
def test_resolve_task_type_fallbacks():
"""Verify missing/unmapped plugin_id returns TaskType.UNKNOWN."""
assert resolve_task_type(None) == TaskType.UNKNOWN
@@ -23,6 +24,7 @@ def test_resolve_task_type_fallbacks():
# [DEF:test_resolve_task_type_valid:Function]
# @RELATION: BINDS_TO -> __tests__/test_report_type_profiles
# @PURPOSE: Verify resolve_task_type_valid returns the correct type when valid input is provided.
def test_resolve_task_type_valid():
"""Verify known plugin IDs map correctly."""
assert resolve_task_type("superset-migration") == TaskType.MIGRATION
@@ -35,6 +37,7 @@ def test_resolve_task_type_valid():
# [DEF:test_get_type_profile_valid:Function]
# @RELATION: BINDS_TO -> __tests__/test_report_type_profiles
# @PURPOSE: Verify get_type_profile_valid returns the correct profile for a valid task type.
def test_get_type_profile_valid():
"""Verify known task types return correct profile metadata."""
profile = get_type_profile(TaskType.MIGRATION)
@@ -48,6 +51,7 @@ def test_get_type_profile_valid():
# [DEF:test_get_type_profile_fallback:Function]
# @RELATION: BINDS_TO -> __tests__/test_report_type_profiles
# @PURPOSE: Verify get_type_profile_fallback returns default profile when type is unknown.
def test_get_type_profile_fallback():
"""Verify unknown task type returns fallback profile."""
# Assuming TaskType.UNKNOWN or any non-mapped value

View File

@@ -6,7 +6,11 @@
# @RELATION: DEPENDS_ON ->[backend.src.core.task_manager.models.Task:Function]
# @RELATION: DEPENDS_ON ->[backend.src.models.report:Function]
# @RELATION: DEPENDS_ON ->[backend.src.services.reports.type_profiles:Function]
# @INVARIANT: Unknown task types and partial payloads remain visible via fallback mapping.
# @INVARIANT: Normalizer instance maintains consistent field order
# @DATA_CONTRACT: ReportRow -> NormalizerInput; session_id -> valid UUID
# @PRE: session is active and valid
# @POST: Returns Normalizer output with normalized fields
# @SIDE_EFFECT: Read-only database operations
# [SECTION: IMPORTS]
from datetime import datetime

View File

@@ -10,7 +10,11 @@
# @RELATION: [DEPENDS_ON] ->[ReportDetailView]
# @RELATION: [DEPENDS_ON] ->[normalize_task_report]
# @RELATION: [DEPENDS_ON] ->[CleanReleaseRepository]
# @INVARIANT: List responses are deterministic and include applied filter echo metadata.
# @INVARIANT: ReportService maintains consistent report structure
# @DATA_CONTRACT: ReportQuery -> ReportRow; session_id -> valid UUID
# @PRE: session is active and valid
# @POST: Returns Report with generated summary
# @SIDE_EFFECT: Read-only database operations; logs report generation
# [SECTION: IMPORTS]
from datetime import datetime, timezone