fix: commit semantic repair changes

This commit is contained in:
2026-03-21 11:22:25 +03:00
parent 0900208c1a
commit abee05558f
272 changed files with 4603 additions and 1668 deletions

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.core.migration.test_archive_parser:Module]
# [DEF:TestArchiveParser:Module]
#
# @COMPLEXITY: 3
# @PURPOSE: Unit tests for MigrationArchiveParser ZIP extraction contract.
@@ -20,6 +20,8 @@ if backend_dir not in sys.path:
from src.core.migration.archive_parser import MigrationArchiveParser
# [DEF:test_extract_objects_from_zip_collects_all_types:Function]
# @RELATION: BINDS_TO -> TestArchiveParser
def test_extract_objects_from_zip_collects_all_types():
parser = MigrationArchiveParser()
with tempfile.TemporaryDirectory() as td:
@@ -59,4 +61,5 @@ def test_extract_objects_from_zip_collects_all_types():
raise AssertionError("dataset uuid mismatch")
# [/DEF:backend.tests.core.migration.test_archive_parser:Module]
# [/DEF:TestArchiveParser:Module]
# [/DEF:test_extract_objects_from_zip_collects_all_types:Function]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.core.migration.test_dry_run_orchestrator:Module]
# [DEF:TestDryRunOrchestrator:Module]
#
# @COMPLEXITY: 3
# @PURPOSE: Unit tests for MigrationDryRunService diff and risk computation contracts.
@@ -23,11 +23,17 @@ from src.models.dashboard import DashboardSelection
from src.models.mapping import Base
# [DEF:_load_fixture:Function]
# @RELATION: BINDS_TO -> TestDryRunOrchestrator
def _load_fixture() -> dict:
fixture_path = Path(__file__).parents[2] / "fixtures" / "migration_dry_run_fixture.json"
return json.loads(fixture_path.read_text())
# [/DEF:_load_fixture:Function]
# [DEF:_make_session:Function]
# @RELATION: BINDS_TO -> TestDryRunOrchestrator
def _make_session():
engine = create_engine(
"sqlite:///:memory:",
@@ -39,6 +45,10 @@ def _make_session():
return Session()
# [/DEF:_make_session:Function]
# [DEF:test_migration_dry_run_service_builds_diff_and_risk:Function]
# @RELATION: BINDS_TO -> TestDryRunOrchestrator
def test_migration_dry_run_service_builds_diff_and_risk():
# @TEST_CONTRACT: dry_run_result_contract -> {
# required_fields: {diff: object, summary: object, risk: object},
@@ -107,4 +117,5 @@ def test_migration_dry_run_service_builds_diff_and_risk():
raise AssertionError("breaking_reference risk is not detected")
# [/DEF:backend.tests.core.migration.test_dry_run_orchestrator:Module]
# [/DEF:TestDryRunOrchestrator:Module]
# [/DEF:test_migration_dry_run_service_builds_diff_and_risk:Function]

View File

@@ -11,6 +11,8 @@ from src.services.git_service import GitService
from src.core.superset_client import SupersetClient
from src.core.config_models import Environment
# [DEF:test_git_service_get_repo_path_guard:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_git_service_get_repo_path_guard():
"""Verify that _get_repo_path raises ValueError if dashboard_id is None."""
service = GitService(base_path="test_repos")
@@ -18,6 +20,10 @@ def test_git_service_get_repo_path_guard():
service._get_repo_path(None)
# [/DEF:test_git_service_get_repo_path_guard:Function]
# [DEF:test_git_service_get_repo_path_recreates_base_dir:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_git_service_get_repo_path_recreates_base_dir():
"""Verify _get_repo_path recreates missing base directory before returning repo path."""
service = GitService(base_path="test_repos_runtime_recreate")
@@ -28,6 +34,10 @@ def test_git_service_get_repo_path_recreates_base_dir():
assert Path(service.base_path).is_dir()
assert repo_path == str(Path(service.base_path) / "42")
# [/DEF:test_git_service_get_repo_path_recreates_base_dir:Function]
# [DEF:test_superset_client_import_dashboard_guard:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_superset_client_import_dashboard_guard():
"""Verify that import_dashboard raises ValueError if file_name is None."""
mock_env = Environment(
@@ -42,6 +52,10 @@ def test_superset_client_import_dashboard_guard():
client.import_dashboard(None)
# [/DEF:test_superset_client_import_dashboard_guard:Function]
# [DEF:test_git_service_init_repo_reclones_when_path_is_not_a_git_repo:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_git_service_init_repo_reclones_when_path_is_not_a_git_repo():
"""Verify init_repo reclones when target path exists but is not a valid Git repository."""
service = GitService(base_path="test_repos_invalid_repo")
@@ -61,6 +75,10 @@ def test_git_service_init_repo_reclones_when_path_is_not_a_git_repo():
assert not target_path.exists()
# [/DEF:test_git_service_init_repo_reclones_when_path_is_not_a_git_repo:Function]
# [DEF:test_git_service_ensure_gitflow_branches_creates_and_pushes_missing_defaults:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_git_service_ensure_gitflow_branches_creates_and_pushes_missing_defaults():
"""Verify _ensure_gitflow_branches creates dev/preprod locally and pushes them to origin."""
service = GitService(base_path="test_repos_gitflow_defaults")
@@ -115,6 +133,10 @@ def test_git_service_ensure_gitflow_branches_creates_and_pushes_missing_defaults
assert "preprod:preprod" in repo.origin.pushed
# [/DEF:test_git_service_ensure_gitflow_branches_creates_and_pushes_missing_defaults:Function]
# [DEF:test_git_service_configure_identity_updates_repo_local_config:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_git_service_configure_identity_updates_repo_local_config():
"""Verify configure_identity writes repository-local user.name/user.email."""
service = GitService(base_path="test_repos_identity")
@@ -130,3 +152,4 @@ def test_git_service_configure_identity_updates_repo_local_config():
fake_repo.config_writer.assert_called_once_with(config_level="repository")
config_writer.set_value.assert_any_call("user", "name", "user_1")
config_writer.set_value.assert_any_call("user", "email", "user1@mail.ru")
# [/DEF:test_git_service_configure_identity_updates_repo_local_config:Function]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.core.test_git_service_gitea_pr:Module]
# [DEF:TestGitServiceGiteaPr:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3
# @SEMANTICS: tests, git, gitea, pull_request, fallback
# @PURPOSE: Validate Gitea PR creation fallback behavior when configured server URL is stale.
# @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.git_service.create_gitea_pull_request
# @INVARIANT: A 404 from primary Gitea URL retries once against remote-url host when different.
import asyncio
@@ -19,6 +19,7 @@ from src.services.git_service import GitService
# [DEF:test_derive_server_url_from_remote_strips_credentials:Function]
# @RELATION: BINDS_TO -> TestGitServiceGiteaPr
# @PURPOSE: Ensure helper returns host base URL and removes embedded credentials.
# @PRE: remote_url is an https URL with username/token.
# @POST: Result is scheme+host only.
@@ -32,6 +33,7 @@ def test_derive_server_url_from_remote_strips_credentials():
# [DEF:test_create_gitea_pull_request_retries_with_remote_host_on_404:Function]
# @RELATION: BINDS_TO -> TestGitServiceGiteaPr
# @PURPOSE: Verify create_gitea_pull_request retries with remote URL host after primary 404.
# @PRE: primary server_url differs from remote_url host.
# @POST: Method returns success payload from fallback request.
@@ -67,6 +69,7 @@ def test_create_gitea_pull_request_retries_with_remote_host_on_404(monkeypatch):
# [DEF:test_create_gitea_pull_request_returns_branch_error_when_target_missing:Function]
# @RELATION: BINDS_TO -> TestGitServiceGiteaPr
# @PURPOSE: Ensure Gitea 404 on PR creation is mapped to actionable target-branch validation error.
# @PRE: PR create call returns 404 and target branch is absent.
# @POST: Service raises HTTPException 400 with explicit missing target branch message.
@@ -101,4 +104,4 @@ def test_create_gitea_pull_request_returns_branch_error_when_target_missing(monk
assert "target branch 'preprod'" in str(exc_info.value.detail)
# [/DEF:test_create_gitea_pull_request_returns_branch_error_when_target_missing:Function]
# [/DEF:backend.tests.core.test_git_service_gitea_pr:Module]
# [/DEF:TestGitServiceGiteaPr:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.core.test_migration_engine:Module]
# [DEF:TestMigrationEngine:Module]
#
# @COMPLEXITY: 3
# @PURPOSE: Unit tests for MigrationEngine's cross-filter patching algorithms.
@@ -26,8 +26,15 @@ from src.models.mapping import ResourceType
# --- Fixtures ---
# [DEF:MockMappingService:Class]
# @RELATION: BINDS_TO ->[TestMigrationEngine]
# @COMPLEXITY: 2
# @PURPOSE: Deterministic mapping service double for native filter ID remapping scenarios.
# @INVARIANT: Returns mappings only for requested UUID keys present in seeded map.
class MockMappingService:
"""Mock that simulates IdMappingService.get_remote_ids_batch."""
def __init__(self, mappings: dict):
self.mappings = mappings
@@ -39,26 +46,32 @@ class MockMappingService:
return result
# [/DEF:MockMappingService:Class]
# [DEF:_write_dashboard_yaml:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def _write_dashboard_yaml(dir_path: Path, metadata: dict) -> Path:
"""Helper: writes a dashboard YAML file with json_metadata."""
file_path = dir_path / "dash.yaml"
with open(file_path, 'w') as f:
with open(file_path, "w") as f:
yaml.dump({"json_metadata": json.dumps(metadata)}, f)
return file_path
# --- _patch_dashboard_metadata tests ---
# [/DEF:_write_dashboard_yaml:Function]
# [DEF:test_patch_dashboard_metadata_replaces_chart_ids:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_patch_dashboard_metadata_replaces_chart_ids():
"""Verifies that chartId values are replaced using the mapping service."""
mock_service = MockMappingService({"uuid-chart-A": 999})
engine = MigrationEngine(mock_service)
metadata = {
"native_filter_configuration": [
{"targets": [{"chartId": 42}]}
]
}
metadata = {"native_filter_configuration": [{"targets": [{"chartId": 42}]}]}
with tempfile.TemporaryDirectory() as td:
fp = _write_dashboard_yaml(Path(td), metadata)
@@ -66,22 +79,25 @@ def test_patch_dashboard_metadata_replaces_chart_ids():
engine._patch_dashboard_metadata(fp, "target-env", source_map)
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
result = json.loads(data["json_metadata"])
assert result["native_filter_configuration"][0]["targets"][0]["chartId"] == 999
assert (
result["native_filter_configuration"][0]["targets"][0]["chartId"] == 999
)
# [/DEF:test_patch_dashboard_metadata_replaces_chart_ids:Function]
# [DEF:test_patch_dashboard_metadata_replaces_dataset_ids:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_patch_dashboard_metadata_replaces_dataset_ids():
"""Verifies that datasetId values are replaced using the mapping service."""
mock_service = MockMappingService({"uuid-ds-B": 500})
engine = MigrationEngine(mock_service)
metadata = {
"native_filter_configuration": [
{"targets": [{"datasetId": 10}]}
]
}
metadata = {"native_filter_configuration": [{"targets": [{"datasetId": 10}]}]}
with tempfile.TemporaryDirectory() as td:
fp = _write_dashboard_yaml(Path(td), metadata)
@@ -89,12 +105,20 @@ def test_patch_dashboard_metadata_replaces_dataset_ids():
engine._patch_dashboard_metadata(fp, "target-env", source_map)
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
result = json.loads(data["json_metadata"])
assert result["native_filter_configuration"][0]["targets"][0]["datasetId"] == 500
assert (
result["native_filter_configuration"][0]["targets"][0]["datasetId"]
== 500
)
# [/DEF:test_patch_dashboard_metadata_replaces_dataset_ids:Function]
# [DEF:test_patch_dashboard_metadata_skips_when_no_metadata:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_patch_dashboard_metadata_skips_when_no_metadata():
"""Verifies early return when json_metadata key is absent."""
mock_service = MockMappingService({})
@@ -102,16 +126,21 @@ def test_patch_dashboard_metadata_skips_when_no_metadata():
with tempfile.TemporaryDirectory() as td:
fp = Path(td) / "dash.yaml"
with open(fp, 'w') as f:
with open(fp, "w") as f:
yaml.dump({"title": "No metadata here"}, f)
engine._patch_dashboard_metadata(fp, "target-env", {})
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
assert "json_metadata" not in data
# [/DEF:test_patch_dashboard_metadata_skips_when_no_metadata:Function]
# [DEF:test_patch_dashboard_metadata_handles_missing_targets:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_patch_dashboard_metadata_handles_missing_targets():
"""When some source IDs have no target mapping, patches what it can and leaves the rest."""
mock_service = MockMappingService({"uuid-A": 100}) # Only uuid-A maps
@@ -129,7 +158,7 @@ def test_patch_dashboard_metadata_handles_missing_targets():
engine._patch_dashboard_metadata(fp, "target-env", source_map)
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
result = json.loads(data["json_metadata"])
targets = result["native_filter_configuration"][0]["targets"]
@@ -140,6 +169,11 @@ def test_patch_dashboard_metadata_handles_missing_targets():
# --- _extract_chart_uuids_from_archive tests ---
# [/DEF:test_patch_dashboard_metadata_handles_missing_targets:Function]
# [DEF:test_extract_chart_uuids_from_archive:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_extract_chart_uuids_from_archive():
"""Verifies that chart YAML files are parsed for id->uuid mappings."""
engine = MigrationEngine()
@@ -151,9 +185,9 @@ def test_extract_chart_uuids_from_archive():
chart1 = {"id": 42, "uuid": "uuid-42", "slice_name": "Chart One"}
chart2 = {"id": 99, "uuid": "uuid-99", "slice_name": "Chart Two"}
with open(charts_dir / "chart1.yaml", 'w') as f:
with open(charts_dir / "chart1.yaml", "w") as f:
yaml.dump(chart1, f)
with open(charts_dir / "chart2.yaml", 'w') as f:
with open(charts_dir / "chart2.yaml", "w") as f:
yaml.dump(chart2, f)
result = engine._extract_chart_uuids_from_archive(Path(td))
@@ -163,23 +197,33 @@ def test_extract_chart_uuids_from_archive():
# --- _transform_yaml tests ---
# [/DEF:test_extract_chart_uuids_from_archive:Function]
# [DEF:test_transform_yaml_replaces_database_uuid:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_transform_yaml_replaces_database_uuid():
"""Verifies that database_uuid in a dataset YAML is replaced."""
engine = MigrationEngine()
with tempfile.TemporaryDirectory() as td:
fp = Path(td) / "dataset.yaml"
with open(fp, 'w') as f:
with open(fp, "w") as f:
yaml.dump({"database_uuid": "source-uuid-abc", "table_name": "my_table"}, f)
engine._transform_yaml(fp, {"source-uuid-abc": "target-uuid-xyz"})
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
assert data["database_uuid"] == "target-uuid-xyz"
assert data["table_name"] == "my_table"
# [/DEF:test_transform_yaml_replaces_database_uuid:Function]
# [DEF:test_transform_yaml_ignores_unmapped_uuid:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_transform_yaml_ignores_unmapped_uuid():
"""Verifies no changes when UUID is not in the mapping."""
engine = MigrationEngine()
@@ -187,18 +231,23 @@ def test_transform_yaml_ignores_unmapped_uuid():
with tempfile.TemporaryDirectory() as td:
fp = Path(td) / "dataset.yaml"
original = {"database_uuid": "unknown-uuid", "table_name": "test"}
with open(fp, 'w') as f:
with open(fp, "w") as f:
yaml.dump(original, f)
engine._transform_yaml(fp, {"other-uuid": "replacement"})
with open(fp, 'r') as f:
with open(fp, "r") as f:
data = yaml.safe_load(f)
assert data["database_uuid"] == "unknown-uuid"
# --- [NEW] transform_zip E2E tests ---
# [/DEF:test_transform_yaml_ignores_unmapped_uuid:Function]
# [DEF:test_transform_zip_end_to_end:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_transform_zip_end_to_end():
"""Verifies full orchestration: extraction, transformation, patching, and re-packaging."""
mock_service = MockMappingService({"char-uuid": 101, "ds-uuid": 202})
@@ -212,41 +261,41 @@ def test_transform_zip_end_to_end():
# Create source ZIP structure
with tempfile.TemporaryDirectory() as src_dir:
src_path = Path(src_dir)
# 1. Dataset
ds_dir = src_path / "datasets"
ds_dir.mkdir()
with open(ds_dir / "ds.yaml", 'w') as f:
with open(ds_dir / "ds.yaml", "w") as f:
yaml.dump({"database_uuid": "source-db-uuid", "table_name": "users"}, f)
# 2. Chart
ch_dir = src_path / "charts"
ch_dir.mkdir()
with open(ch_dir / "ch.yaml", 'w') as f:
with open(ch_dir / "ch.yaml", "w") as f:
yaml.dump({"id": 10, "uuid": "char-uuid"}, f)
# 3. Dashboard
db_dir = src_path / "dashboards"
db_dir.mkdir()
metadata = {"native_filter_configuration": [{"targets": [{"chartId": 10}]}]}
with open(db_dir / "db.yaml", 'w') as f:
with open(db_dir / "db.yaml", "w") as f:
yaml.dump({"json_metadata": json.dumps(metadata)}, f)
with zipfile.ZipFile(zip_path, 'w') as zf:
with zipfile.ZipFile(zip_path, "w") as zf:
for root, _, files in os.walk(src_path):
for file in files:
p = Path(root) / file
zf.write(p, p.relative_to(src_path))
db_mapping = {"source-db-uuid": "target-db-uuid"}
# Execute transform
success = engine.transform_zip(
str(zip_path),
str(output_path),
db_mapping,
target_env_id="test-target",
fix_cross_filters=True
str(zip_path),
str(output_path),
db_mapping,
target_env_id="test-target",
fix_cross_filters=True,
)
assert success is True
@@ -254,23 +303,31 @@ def test_transform_zip_end_to_end():
# Verify contents
with tempfile.TemporaryDirectory() as out_dir:
with zipfile.ZipFile(output_path, 'r') as zf:
with zipfile.ZipFile(output_path, "r") as zf:
zf.extractall(out_dir)
out_path = Path(out_dir)
# Verify dataset transformation
with open(out_path / "datasets" / "ds.yaml", 'r') as f:
with open(out_path / "datasets" / "ds.yaml", "r") as f:
ds_data = yaml.safe_load(f)
assert ds_data["database_uuid"] == "target-db-uuid"
# Verify dashboard patching
with open(out_path / "dashboards" / "db.yaml", 'r') as f:
with open(out_path / "dashboards" / "db.yaml", "r") as f:
db_data = yaml.safe_load(f)
meta = json.loads(db_data["json_metadata"])
assert meta["native_filter_configuration"][0]["targets"][0]["chartId"] == 101
assert (
meta["native_filter_configuration"][0]["targets"][0]["chartId"]
== 101
)
# [/DEF:test_transform_zip_end_to_end:Function]
# [DEF:test_transform_zip_invalid_path:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_transform_zip_invalid_path():
"""@PRE: Verify behavior (False) on invalid ZIP path."""
engine = MigrationEngine()
@@ -278,13 +335,19 @@ def test_transform_zip_invalid_path():
assert success is False
# [/DEF:test_transform_zip_invalid_path:Function]
# [DEF:test_transform_yaml_nonexistent_file:Function]
# @RELATION: BINDS_TO -> TestMigrationEngine
def test_transform_yaml_nonexistent_file():
"""@PRE: Verify behavior on non-existent YAML file."""
engine = MigrationEngine()
# Should log error and not crash (implemented via try-except if wrapped,
# Should log error and not crash (implemented via try-except if wrapped,
# but _transform_yaml itself might raise FileNotFoundError if not guarded)
with pytest.raises(FileNotFoundError):
engine._transform_yaml(Path("non_existent.yaml"), {})
# [/DEF:backend.tests.core.test_migration_engine:Module]
# [/DEF:TestMigrationEngine:Module]
# [/DEF:test_transform_yaml_nonexistent_file:Function]

View File

@@ -1,4 +1,5 @@
# [DEF:test_clean_release_cli:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3
# @PURPOSE: Smoke tests for the redesigned clean release CLI.
# @LAYER: Domain
@@ -17,6 +18,8 @@ from src.services.clean_release.enums import CandidateStatus, ComplianceDecision
from src.scripts.clean_release_cli import main as cli_main
# [DEF:test_cli_candidate_register_scaffold:Function]
# @RELATION: BINDS_TO -> test_clean_release_cli
def test_cli_candidate_register_scaffold() -> None:
"""Candidate register CLI command smoke test."""
exit_code = cli_main(
@@ -35,6 +38,10 @@ def test_cli_candidate_register_scaffold() -> None:
assert exit_code == 0
# [/DEF:test_cli_candidate_register_scaffold:Function]
# [DEF:test_cli_manifest_build_scaffold:Function]
# @RELATION: BINDS_TO -> test_clean_release_cli
def test_cli_manifest_build_scaffold() -> None:
"""Manifest build CLI command smoke test."""
register_exit = cli_main(
@@ -81,6 +88,10 @@ def test_cli_manifest_build_scaffold() -> None:
assert manifest_exit == 0
# [/DEF:test_cli_manifest_build_scaffold:Function]
# [DEF:test_cli_compliance_run_scaffold:Function]
# @RELATION: BINDS_TO -> test_clean_release_cli
def test_cli_compliance_run_scaffold() -> None:
"""Compliance CLI command smoke test for run/status/report/violations."""
repository = get_clean_release_repository()
@@ -181,6 +192,10 @@ def test_cli_compliance_run_scaffold() -> None:
assert report_exit == 0
# [/DEF:test_cli_compliance_run_scaffold:Function]
# [DEF:test_cli_release_gate_commands_scaffold:Function]
# @RELATION: BINDS_TO -> test_clean_release_cli
def test_cli_release_gate_commands_scaffold() -> None:
"""Release gate CLI smoke test for approve/reject/publish/revoke commands."""
repository = get_clean_release_repository()
@@ -303,3 +318,4 @@ def test_cli_release_gate_commands_scaffold() -> None:
# [/DEF:test_clean_release_cli:Module]
# [/DEF:test_cli_release_gate_commands_scaffold:Function]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.scripts.test_clean_release_tui:Module]
# [DEF:TestCleanReleaseTui:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3
# @SEMANTICS: tests, tui, clean-release, curses
# @PURPOSE: Unit tests for the interactive curses TUI of the clean release process.
# @LAYER: Scripts
# @RELATION: TESTS -> backend.src.scripts.clean_release_tui
# @INVARIANT: TUI initializes, handles hotkeys (F5, F10) and safely falls back without TTY.
import os
@@ -27,6 +27,8 @@ def mock_stdscr() -> MagicMock:
return stdscr
# [DEF:test_headless_fallback:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_headless_fallback(capsys):
"""
@TEST_EDGE: stdout_unavailable
@@ -44,7 +46,11 @@ def test_headless_fallback(capsys):
assert "Use CLI/API workflow instead" in captured.err
# [/DEF:test_headless_fallback:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_initial_render:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_tui_initial_render(mock_curses_module, mock_stdscr: MagicMock):
"""
Simulates the initial rendering cycle of the TUI application to ensure
@@ -77,7 +83,11 @@ def test_tui_initial_render(mock_curses_module, mock_stdscr: MagicMock):
assert any("F5 Run" in str(call) for call in addstr_calls)
# [/DEF:test_tui_initial_render:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_run_checks_f5:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_tui_run_checks_f5(mock_curses_module, mock_stdscr: MagicMock):
"""
Simulates pressing F5 to transition into the RUNNING checks flow.
@@ -112,7 +122,11 @@ def test_tui_run_checks_f5(mock_curses_module, mock_stdscr: MagicMock):
assert len(app.violations_list) > 0
# [/DEF:test_tui_run_checks_f5:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_exit_f10:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_tui_exit_f10(mock_curses_module, mock_stdscr: MagicMock):
"""
Simulates pressing F10 to exit the application immediately without running checks.
@@ -129,7 +143,11 @@ def test_tui_exit_f10(mock_curses_module, mock_stdscr: MagicMock):
assert app.status == "READY"
# [/DEF:test_tui_exit_f10:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_clear_history_f7:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_tui_clear_history_f7(mock_curses_module, mock_stdscr: MagicMock):
"""
Simulates pressing F7 to clear history.
@@ -153,11 +171,17 @@ def test_tui_clear_history_f7(mock_curses_module, mock_stdscr: MagicMock):
assert len(app.checks_progress) == 0
# [/DEF:test_tui_clear_history_f7:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_real_mode_bootstrap_imports_artifacts_catalog:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseTui
def test_tui_real_mode_bootstrap_imports_artifacts_catalog(
mock_curses_module,
mock_stdscr: MagicMock,
tmp_path,
# [/DEF:test_tui_real_mode_bootstrap_imports_artifacts_catalog:Function]
):
"""
@TEST_CONTRACT: bootstrap.json + artifacts.json -> candidate PREPARED with imported artifacts
@@ -220,4 +244,4 @@ def test_tui_real_mode_bootstrap_imports_artifacts_catalog(
assert artifacts[0].detected_category == "core"
# [/DEF:backend.tests.scripts.test_clean_release_tui:Module]
# [/DEF:TestCleanReleaseTui:Module]

View File

@@ -1,8 +1,8 @@
# [DEF:test_clean_release_tui_v2:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3
# @PURPOSE: Smoke tests for thin-client TUI action dispatch and blocked transition behavior.
# @LAYER: Domain
# @RELATION: TESTS -> backend.src.scripts.clean_release_tui
"""Smoke tests for the redesigned clean release TUI."""
@@ -15,6 +15,8 @@ from src.models.clean_release import CheckFinalStatus
from src.scripts.clean_release_tui import CleanReleaseTUI, main
# [DEF:_build_mock_stdscr:Function]
# @RELATION: BINDS_TO -> test_clean_release_tui_v2
def _build_mock_stdscr() -> MagicMock:
stdscr = MagicMock()
stdscr.getmaxyx.return_value = (40, 120)
@@ -22,7 +24,11 @@ def _build_mock_stdscr() -> MagicMock:
return stdscr
# [/DEF:_build_mock_stdscr:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_f5_dispatches_run_action:Function]
# @RELATION: BINDS_TO -> test_clean_release_tui_v2
def test_tui_f5_dispatches_run_action(mock_curses_module: MagicMock) -> None:
"""F5 should dispatch run action from TUI loop."""
mock_curses_module.KEY_F10 = curses.KEY_F10
@@ -40,7 +46,11 @@ def test_tui_f5_dispatches_run_action(mock_curses_module: MagicMock) -> None:
run_checks_mock.assert_called_once_with()
# [/DEF:test_tui_f5_dispatches_run_action:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_f5_run_smoke_reports_blocked_state:Function]
# @RELATION: BINDS_TO -> test_clean_release_tui_v2
def test_tui_f5_run_smoke_reports_blocked_state(mock_curses_module: MagicMock) -> None:
"""F5 smoke test should expose blocked outcome state after run action."""
mock_curses_module.KEY_F10 = curses.KEY_F10
@@ -65,6 +75,10 @@ def test_tui_f5_run_smoke_reports_blocked_state(mock_curses_module: MagicMock) -
assert app.violations_list
# [/DEF:test_tui_f5_run_smoke_reports_blocked_state:Function]
# [DEF:test_tui_non_tty_refuses_startup:Function]
# @RELATION: BINDS_TO -> test_clean_release_tui_v2
def test_tui_non_tty_refuses_startup(capsys) -> None:
"""Non-TTY startup must refuse TUI mode and redirect operator to CLI/API flow."""
with patch("sys.stdout.isatty", return_value=False):
@@ -76,7 +90,11 @@ def test_tui_non_tty_refuses_startup(capsys) -> None:
assert "Use CLI/API workflow instead" in captured.err
# [/DEF:test_tui_non_tty_refuses_startup:Function]
@patch("src.scripts.clean_release_tui.curses")
# [DEF:test_tui_f8_blocked_without_facade_binding:Function]
# @RELATION: BINDS_TO -> test_clean_release_tui_v2
def test_tui_f8_blocked_without_facade_binding(mock_curses_module: MagicMock) -> None:
"""F8 should not perform hidden state mutation when facade action is not bound."""
mock_curses_module.KEY_F10 = curses.KEY_F10
@@ -95,3 +113,4 @@ def test_tui_f8_blocked_without_facade_binding(mock_curses_module: MagicMock) ->
# [/DEF:test_clean_release_tui_v2:Module]
# [/DEF:test_tui_f8_blocked_without_facade_binding:Function]

View File

@@ -1,11 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_approval_service:Module]
# [DEF:TestApprovalService:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, approval, lifecycle, gate
# @PURPOSE: Define approval gate contracts for approve/reject operations over immutable compliance evidence.
# @LAYER: Tests
# @RELATION: TESTS -> src.services.clean_release.approval_service
# @RELATION: TESTS -> src.services.clean_release.enums
# @RELATION: TESTS -> src.services.clean_release.repository
# @INVARIANT: Approval is allowed only for PASSED report bound to candidate; duplicate approve and foreign report must be rejected.
from __future__ import annotations
@@ -21,6 +19,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_seed_candidate_with_report:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Seed candidate and report fixtures for approval gate tests.
# @PRE: candidate_id and report_id are non-empty.
# @POST: Repository contains candidate and report linked by candidate_id.
@@ -61,6 +60,7 @@ def _seed_candidate_with_report(
# [DEF:test_approve_rejects_blocked_report:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Ensure approve is rejected when latest report final status is not PASSED.
# @PRE: Candidate has BLOCKED report.
# @POST: approve_candidate raises ApprovalGateError.
@@ -83,6 +83,7 @@ def test_approve_rejects_blocked_report():
# [DEF:test_approve_rejects_foreign_report:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Ensure approve is rejected when report belongs to another candidate.
# @PRE: Candidate exists, report candidate_id differs.
# @POST: approve_candidate raises ApprovalGateError.
@@ -113,6 +114,7 @@ def test_approve_rejects_foreign_report():
# [DEF:test_approve_rejects_duplicate_approve:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Ensure repeated approve decision for same candidate is blocked.
# @PRE: Candidate has already been approved once.
# @POST: Second approve_candidate call raises ApprovalGateError.
@@ -143,6 +145,7 @@ def test_approve_rejects_duplicate_approve():
# [DEF:test_reject_persists_decision_without_promoting_candidate_state:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Ensure reject decision is immutable and does not promote candidate to APPROVED.
# @PRE: Candidate has PASSED report and CHECK_PASSED lifecycle state.
# @POST: reject_candidate persists REJECTED decision; candidate status remains unchanged.
@@ -167,6 +170,7 @@ def test_reject_persists_decision_without_promoting_candidate_state():
# [DEF:test_reject_then_publish_is_blocked:Function]
# @RELATION: BINDS_TO -> TestApprovalService
# @PURPOSE: Ensure latest REJECTED decision blocks publication gate.
# @PRE: Candidate is rejected for passed report.
# @POST: publish_candidate raises PublicationGateError.
@@ -196,4 +200,4 @@ def test_reject_then_publish_is_blocked():
)
# [/DEF:test_reject_then_publish_is_blocked:Function]
# [/DEF:backend.tests.services.clean_release.test_approval_service:Module]
# [/DEF:TestApprovalService:Module]

View File

@@ -1,4 +1,5 @@
# [DEF:test_candidate_manifest_services:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3
# @PURPOSE: Test lifecycle and manifest versioning for release candidates.
# @LAYER: Tests
@@ -23,6 +24,8 @@ def db_session():
yield session
session.close()
# [DEF:test_candidate_lifecycle_transitions:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_candidate_lifecycle_transitions(db_session):
"""
@PURPOSE: Verify legal state transitions for ReleaseCandidate.
@@ -47,6 +50,10 @@ def test_candidate_lifecycle_transitions(db_session):
with pytest.raises(IllegalTransitionError, match="Forbidden transition"):
candidate.transition_to(CandidateStatus.DRAFT)
# [/DEF:test_candidate_lifecycle_transitions:Function]
# [DEF:test_manifest_versioning_and_immutability:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_manifest_versioning_and_immutability(db_session):
"""
@PURPOSE: Verify manifest versioning and immutability invariants.
@@ -90,6 +97,10 @@ def test_manifest_versioning_and_immutability(db_session):
assert len(all_manifests) == 2
# [/DEF:test_manifest_versioning_and_immutability:Function]
# [DEF:_valid_artifacts:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def _valid_artifacts():
return [
{
@@ -101,6 +112,10 @@ def _valid_artifacts():
]
# [/DEF:_valid_artifacts:Function]
# [DEF:test_register_candidate_rejects_duplicate_candidate_id:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_register_candidate_rejects_duplicate_candidate_id():
repository = CleanReleaseRepository()
register_candidate(
@@ -123,6 +138,10 @@ def test_register_candidate_rejects_duplicate_candidate_id():
)
# [/DEF:test_register_candidate_rejects_duplicate_candidate_id:Function]
# [DEF:test_register_candidate_rejects_malformed_artifact_input:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_register_candidate_rejects_malformed_artifact_input():
repository = CleanReleaseRepository()
bad_artifacts = [{"id": "art-1", "path": "bin/app", "size": 42}] # missing sha256
@@ -138,6 +157,10 @@ def test_register_candidate_rejects_malformed_artifact_input():
)
# [/DEF:test_register_candidate_rejects_malformed_artifact_input:Function]
# [DEF:test_register_candidate_rejects_empty_artifact_set:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_register_candidate_rejects_empty_artifact_set():
repository = CleanReleaseRepository()
@@ -152,6 +175,10 @@ def test_register_candidate_rejects_empty_artifact_set():
)
# [/DEF:test_register_candidate_rejects_empty_artifact_set:Function]
# [DEF:test_manifest_service_rebuild_creates_new_version:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_manifest_service_rebuild_creates_new_version():
repository = CleanReleaseRepository()
register_candidate(
@@ -171,6 +198,10 @@ def test_manifest_service_rebuild_creates_new_version():
assert first.id != second.id
# [/DEF:test_manifest_service_rebuild_creates_new_version:Function]
# [DEF:test_manifest_service_existing_manifest_cannot_be_mutated:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_manifest_service_existing_manifest_cannot_be_mutated():
repository = CleanReleaseRepository()
register_candidate(
@@ -194,6 +225,10 @@ def test_manifest_service_existing_manifest_cannot_be_mutated():
assert rebuilt.id != created.id
# [/DEF:test_manifest_service_existing_manifest_cannot_be_mutated:Function]
# [DEF:test_manifest_service_rejects_missing_candidate:Function]
# @RELATION: BINDS_TO -> test_candidate_manifest_services
def test_manifest_service_rejects_missing_candidate():
repository = CleanReleaseRepository()
@@ -201,3 +236,4 @@ def test_manifest_service_rejects_missing_candidate():
build_manifest_snapshot(repository=repository, candidate_id="missing-candidate", created_by="operator")
# [/DEF:test_candidate_manifest_services:Module]
# [/DEF:test_manifest_service_rejects_missing_candidate:Function]

View File

@@ -1,10 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_compliance_execution_service:Module]
# [DEF:TestComplianceExecutionService:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, compliance, pipeline, run-finalization
# @PURPOSE: Validate stage pipeline and run finalization contracts for compliance execution.
# @LAYER: Tests
# @RELATION: TESTS -> backend.src.services.clean_release.compliance_orchestrator
# @RELATION: TESTS -> backend.src.services.clean_release.report_builder
# @INVARIANT: Missing manifest prevents run startup; failed execution cannot finalize as PASSED.
from __future__ import annotations
@@ -27,6 +26,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_seed_with_candidate_policy_registry:Function]
# @RELATION: BINDS_TO -> TestComplianceExecutionService
# @PURPOSE: Build deterministic repository state for run startup tests.
# @PRE: candidate_id and snapshot ids are non-empty.
# @POST: Returns repository with candidate, policy and registry; manifest is optional.
@@ -100,6 +100,7 @@ def _seed_with_candidate_policy_registry(
# [DEF:test_run_without_manifest_rejected:Function]
# @RELATION: BINDS_TO -> TestComplianceExecutionService
# @PURPOSE: Ensure compliance run cannot start when manifest is unresolved.
# @PRE: Candidate/policy exist but manifest is missing.
# @POST: start_check_run raises ValueError and no run is persisted.
@@ -120,6 +121,7 @@ def test_run_without_manifest_rejected():
# [DEF:test_task_crash_mid_run_marks_failed:Function]
# @RELATION: BINDS_TO -> TestComplianceExecutionService
# @PURPOSE: Ensure execution crash conditions force FAILED run status.
# @PRE: Run exists, then required dependency becomes unavailable before execute_stages.
# @POST: execute_stages persists run with FAILED status.
@@ -143,6 +145,7 @@ def test_task_crash_mid_run_marks_failed():
# [DEF:test_blocked_run_finalization_blocks_report_builder:Function]
# @RELATION: BINDS_TO -> TestComplianceExecutionService
# @PURPOSE: Ensure blocked runs require blocking violations before report creation.
# @PRE: Manifest contains prohibited artifacts leading to BLOCKED decision.
# @POST: finalize keeps BLOCKED and report_builder rejects zero blocking violations.
@@ -170,4 +173,4 @@ def test_blocked_run_finalization_blocks_report_builder():
builder.build_report_payload(run, [])
# [/DEF:test_blocked_run_finalization_blocks_report_builder:Function]
# [/DEF:backend.tests.services.clean_release.test_compliance_execution_service:Module]
# [/DEF:TestComplianceExecutionService:Module]

View File

@@ -1,10 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_compliance_task_integration:Module]
# [DEF:TestComplianceTaskIntegration:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, compliance, task-manager, integration
# @PURPOSE: Verify clean release compliance runs execute through TaskManager lifecycle with observable success/failure outcomes.
# @LAYER: Tests
# @RELATION: TESTS -> backend.src.core.task_manager.manager.TaskManager
# @RELATION: TESTS -> backend.src.services.clean_release.compliance_orchestrator.CleanComplianceOrchestrator
# @INVARIANT: Compliance execution triggered as task produces terminal task status and persists run evidence.
from __future__ import annotations
@@ -24,16 +23,21 @@ from src.models.clean_release import (
ReleaseCandidate,
SourceRegistrySnapshot,
)
from src.services.clean_release.compliance_orchestrator import CleanComplianceOrchestrator
from src.services.clean_release.compliance_orchestrator import (
CleanComplianceOrchestrator,
)
from src.services.clean_release.enums import CandidateStatus, RunStatus
from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_seed_repository:Function]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: Prepare deterministic candidate/policy/registry/manifest fixtures for task integration tests.
# @PRE: with_manifest controls manifest availability.
# @POST: Returns initialized repository and identifiers for compliance run startup.
def _seed_repository(*, with_manifest: bool) -> tuple[CleanReleaseRepository, str, str, str]:
def _seed_repository(
*, with_manifest: bool
) -> tuple[CleanReleaseRepository, str, str, str]:
repository = CleanReleaseRepository()
candidate_id = "cand-task-int-1"
policy_id = "policy-task-int-1"
@@ -94,10 +98,13 @@ def _seed_repository(*, with_manifest: bool) -> tuple[CleanReleaseRepository, st
)
return repository, candidate_id, policy_id, manifest_id
# [/DEF:_seed_repository:Function]
# [DEF:CleanReleaseCompliancePlugin:Class]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: TaskManager plugin shim that executes clean release compliance orchestration.
class CleanReleaseCompliancePlugin:
@property
@@ -125,12 +132,21 @@ class CleanReleaseCompliancePlugin:
if context is not None:
context.logger.info("Compliance run completed via TaskManager plugin")
return {"run_id": run.id, "run_status": run.status, "final_status": run.final_status}
return {
"run_id": run.id,
"run_status": run.status,
"final_status": run.final_status,
}
# [/DEF:CleanReleaseCompliancePlugin:Class]
# [DEF:_PluginLoaderStub:Class]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @COMPLEXITY: 2
# @PURPOSE: Provide minimal plugin loader contract used by TaskManager in integration tests.
# @INVARIANT: has_plugin/get_plugin only acknowledge the seeded compliance plugin id.
class _PluginLoaderStub:
def __init__(self, plugin: CleanReleaseCompliancePlugin):
self._plugin = plugin
@@ -142,18 +158,26 @@ class _PluginLoaderStub:
if plugin_id != self._plugin.id:
raise ValueError("Plugin not found")
return self._plugin
# [/DEF:_PluginLoaderStub:Class]
# [DEF:_make_task_manager:Function]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: Build TaskManager with mocked persistence services for isolated integration tests.
# @POST: Returns TaskManager ready for async task execution.
def _make_task_manager() -> TaskManager:
plugin_loader = _PluginLoaderStub(CleanReleaseCompliancePlugin())
with patch("src.core.task_manager.manager.TaskPersistenceService") as mock_persistence, patch(
"src.core.task_manager.manager.TaskLogPersistenceService"
) as mock_log_persistence:
with (
patch(
"src.core.task_manager.manager.TaskPersistenceService"
) as mock_persistence,
patch(
"src.core.task_manager.manager.TaskLogPersistenceService"
) as mock_log_persistence,
):
mock_persistence.return_value.load_tasks.return_value = []
mock_persistence.return_value.persist_task = MagicMock()
mock_log_persistence.return_value.add_logs = MagicMock()
@@ -162,14 +186,19 @@ def _make_task_manager() -> TaskManager:
mock_log_persistence.return_value.get_sources = MagicMock(return_value=[])
return TaskManager(plugin_loader)
# [/DEF:_make_task_manager:Function]
# [DEF:_wait_for_terminal_task:Function]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: Poll task registry until target task reaches terminal status.
# @PRE: task_id exists in manager registry.
# @POST: Returns task with SUCCESS or FAILED status, otherwise raises TimeoutError.
async def _wait_for_terminal_task(manager: TaskManager, task_id: str, timeout_seconds: float = 3.0):
async def _wait_for_terminal_task(
manager: TaskManager, task_id: str, timeout_seconds: float = 3.0
):
started = asyncio.get_running_loop().time()
while True:
task = manager.get_task(task_id)
@@ -178,16 +207,21 @@ async def _wait_for_terminal_task(manager: TaskManager, task_id: str, timeout_se
if asyncio.get_running_loop().time() - started > timeout_seconds:
raise TimeoutError(f"Task {task_id} did not reach terminal status")
await asyncio.sleep(0.05)
# [/DEF:_wait_for_terminal_task:Function]
# [DEF:test_compliance_run_executes_as_task_manager_task:Function]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: Verify successful compliance execution is observable as TaskManager SUCCESS task.
# @PRE: Candidate, policy and manifest are available in repository.
# @POST: Task ends with SUCCESS; run is persisted with SUCCEEDED status and task binding.
@pytest.mark.asyncio
async def test_compliance_run_executes_as_task_manager_task():
repository, candidate_id, policy_id, manifest_id = _seed_repository(with_manifest=True)
repository, candidate_id, policy_id, manifest_id = _seed_repository(
with_manifest=True
)
manager = _make_task_manager()
try:
@@ -214,16 +248,21 @@ async def test_compliance_run_executes_as_task_manager_task():
finally:
manager._flusher_stop_event.set()
manager._flusher_thread.join(timeout=2)
# [/DEF:test_compliance_run_executes_as_task_manager_task:Function]
# [DEF:test_compliance_run_missing_manifest_marks_task_failed:Function]
# @RELATION: BINDS_TO -> TestComplianceTaskIntegration
# @PURPOSE: Verify missing manifest startup failure is surfaced as TaskManager FAILED task.
# @PRE: Candidate/policy exist but manifest is absent.
# @POST: Task ends with FAILED and run history remains empty.
@pytest.mark.asyncio
async def test_compliance_run_missing_manifest_marks_task_failed():
repository, candidate_id, policy_id, manifest_id = _seed_repository(with_manifest=False)
repository, candidate_id, policy_id, manifest_id = _seed_repository(
with_manifest=False
)
manager = _make_task_manager()
try:
@@ -241,10 +280,14 @@ async def test_compliance_run_missing_manifest_marks_task_failed():
assert finished.status == TaskStatus.FAILED
assert len(repository.check_runs) == 0
assert any("Manifest or Policy not found" in log.message for log in finished.logs)
assert any(
"Manifest or Policy not found" in log.message for log in finished.logs
)
finally:
manager._flusher_stop_event.set()
manager._flusher_thread.join(timeout=2)
# [/DEF:test_compliance_run_missing_manifest_marks_task_failed:Function]
# [/DEF:backend.tests.services.clean_release.test_compliance_task_integration:Module]
# [/DEF:TestComplianceTaskIntegration:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.services.clean_release.test_demo_mode_isolation:Module]
# [DEF:TestDemoModeIsolation:Module]
# @COMPLEXITY: 3
# @SEMANTICS: clean-release, demo-mode, isolation, namespace, repository
# @PURPOSE: Verify demo and real mode namespace isolation contracts before TUI integration.
@@ -18,6 +18,7 @@ from src.services.clean_release.demo_data_service import (
# [DEF:test_resolve_namespace_separates_demo_and_real:Function]
# @RELATION: BINDS_TO -> TestDemoModeIsolation
# @PURPOSE: Ensure namespace resolver returns deterministic and distinct namespaces.
# @PRE: Mode names are provided as user/runtime strings.
# @POST: Demo and real namespaces are different and stable.
@@ -32,6 +33,7 @@ def test_resolve_namespace_separates_demo_and_real() -> None:
# [DEF:test_build_namespaced_id_prevents_cross_mode_collisions:Function]
# @RELATION: BINDS_TO -> TestDemoModeIsolation
# @PURPOSE: Ensure ID generation prevents demo/real collisions for identical logical IDs.
# @PRE: Same logical candidate id is used in two different namespaces.
# @POST: Produced physical IDs differ by namespace prefix.
@@ -47,6 +49,7 @@ def test_build_namespaced_id_prevents_cross_mode_collisions() -> None:
# [DEF:test_create_isolated_repository_keeps_mode_data_separate:Function]
# @RELATION: BINDS_TO -> TestDemoModeIsolation
# @PURPOSE: Verify demo and real repositories do not leak state across mode boundaries.
# @PRE: Two repositories are created for distinct modes.
# @POST: Candidate mutations in one mode are not visible in the other mode.
@@ -84,4 +87,4 @@ def test_create_isolated_repository_keeps_mode_data_separate() -> None:
assert real_repo.get_candidate(demo_candidate_id) is None
# [/DEF:test_create_isolated_repository_keeps_mode_data_separate:Function]
# [/DEF:backend.tests.services.clean_release.test_demo_mode_isolation:Module]
# [/DEF:TestDemoModeIsolation:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.services.clean_release.test_policy_resolution_service:Module]
# [DEF:TestPolicyResolutionService:Module]
# @COMPLEXITY: 5
# @SEMANTICS: clean-release, policy-resolution, trusted-snapshots, contracts
# @PURPOSE: Verify trusted policy snapshot resolution contract and error guards.
@@ -21,6 +21,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_config_manager:Function]
# @RELATION: BINDS_TO -> TestPolicyResolutionService
# @PURPOSE: Build deterministic ConfigManager-like stub for tests.
# @PRE: policy_id and registry_id may be None or non-empty strings.
# @POST: Returns object exposing get_config().settings.clean_release active IDs.
@@ -33,6 +34,7 @@ def _config_manager(policy_id, registry_id):
# [DEF:test_resolve_trusted_policy_snapshots_missing_profile:Function]
# @RELATION: BINDS_TO -> TestPolicyResolutionService
# @PURPOSE: Ensure resolution fails when trusted profile is not configured.
# @PRE: active_policy_id is None.
# @POST: Raises PolicyResolutionError with missing trusted profile reason.
@@ -49,6 +51,7 @@ def test_resolve_trusted_policy_snapshots_missing_profile():
# [DEF:test_resolve_trusted_policy_snapshots_missing_registry:Function]
# @RELATION: BINDS_TO -> TestPolicyResolutionService
# @PURPOSE: Ensure resolution fails when trusted registry is not configured.
# @PRE: active_registry_id is None and active_policy_id is set.
# @POST: Raises PolicyResolutionError with missing trusted registry reason.
@@ -65,6 +68,7 @@ def test_resolve_trusted_policy_snapshots_missing_registry():
# [DEF:test_resolve_trusted_policy_snapshots_rejects_override_attempt:Function]
# @RELATION: BINDS_TO -> TestPolicyResolutionService
# @PURPOSE: Ensure runtime override attempt is rejected even if snapshots exist.
# @PRE: valid trusted snapshots exist in repository and override is provided.
# @POST: Raises PolicyResolutionError with override forbidden reason.
@@ -102,4 +106,4 @@ def test_resolve_trusted_policy_snapshots_rejects_override_attempt():
)
# [/DEF:test_resolve_trusted_policy_snapshots_rejects_override_attempt:Function]
# [/DEF:backend.tests.services.clean_release.test_policy_resolution_service:Module]
# [/DEF:TestPolicyResolutionService:Module]

View File

@@ -1,11 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_publication_service:Module]
# [DEF:TestPublicationService:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, publication, revoke, gate
# @PURPOSE: Define publication gate contracts over approved candidates and immutable publication records.
# @LAYER: Tests
# @RELATION: TESTS -> src.services.clean_release.publication_service
# @RELATION: TESTS -> src.services.clean_release.approval_service
# @RELATION: TESTS -> src.services.clean_release.repository
# @INVARIANT: Publish requires approval; revoke requires existing publication; republish after revoke is allowed as a new record.
from __future__ import annotations
@@ -21,6 +19,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_seed_candidate_with_passed_report:Function]
# @RELATION: BINDS_TO -> TestPublicationService
# @PURPOSE: Seed candidate/report fixtures for publication gate scenarios.
# @PRE: candidate_id and report_id are non-empty.
# @POST: Repository contains candidate and PASSED report.
@@ -57,6 +56,7 @@ def _seed_candidate_with_passed_report(
# [DEF:test_publish_without_approval_rejected:Function]
# @RELATION: BINDS_TO -> TestPublicationService
# @PURPOSE: Ensure publish action is blocked until candidate is approved.
# @PRE: Candidate has PASSED report but status is not APPROVED.
# @POST: publish_candidate raises PublicationGateError.
@@ -80,6 +80,7 @@ def test_publish_without_approval_rejected():
# [DEF:test_revoke_unknown_publication_rejected:Function]
# @RELATION: BINDS_TO -> TestPublicationService
# @PURPOSE: Ensure revocation is rejected for unknown publication id.
# @PRE: Repository has no matching publication record.
# @POST: revoke_publication raises PublicationGateError.
@@ -99,6 +100,7 @@ def test_revoke_unknown_publication_rejected():
# [DEF:test_republish_after_revoke_creates_new_active_record:Function]
# @RELATION: BINDS_TO -> TestPublicationService
# @PURPOSE: Ensure republish after revoke is allowed and creates a new ACTIVE record.
# @PRE: Candidate is APPROVED and first publication has been revoked.
# @POST: New publish call returns distinct publication id with ACTIVE status.
@@ -145,4 +147,4 @@ def test_republish_after_revoke_creates_new_active_record():
assert second.status == PublicationStatus.ACTIVE.value
# [/DEF:test_republish_after_revoke_creates_new_active_record:Function]
# [/DEF:backend.tests.services.clean_release.test_publication_service:Module]
# [/DEF:TestPublicationService:Module]

View File

@@ -1,11 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_report_audit_immutability:Module]
# [DEF:TestReportAuditImmutability:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, report, audit, immutability, append-only
# @PURPOSE: Validate report snapshot immutability expectations and append-only audit hook behavior for US2.
# @LAYER: Tests
# @RELATION: TESTS -> src.services.clean_release.report_builder.ComplianceReportBuilder
# @RELATION: TESTS -> src.services.clean_release.audit_service
# @RELATION: TESTS -> src.services.clean_release.repository.CleanReleaseRepository
# @INVARIANT: Built reports are immutable snapshots; audit hooks produce append-only event traces.
from __future__ import annotations
@@ -15,18 +13,30 @@ from unittest.mock import patch
import pytest
from src.models.clean_release import ComplianceReport, ComplianceRun, ComplianceViolation
from src.services.clean_release.audit_service import audit_check_run, audit_preparation, audit_report, audit_violation
from src.models.clean_release import (
ComplianceReport,
ComplianceRun,
ComplianceViolation,
)
from src.services.clean_release.audit_service import (
audit_check_run,
audit_preparation,
audit_report,
audit_violation,
)
from src.services.clean_release.enums import ComplianceDecision, RunStatus
from src.services.clean_release.report_builder import ComplianceReportBuilder
from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_terminal_run:Function]
# @RELATION: BINDS_TO -> TestReportAuditImmutability
# @PURPOSE: Build deterministic terminal run fixture for report snapshot tests.
# @PRE: final_status is a valid ComplianceDecision value.
# @POST: Returns a terminal ComplianceRun suitable for report generation.
def _terminal_run(final_status: ComplianceDecision = ComplianceDecision.PASSED) -> ComplianceRun:
def _terminal_run(
final_status: ComplianceDecision = ComplianceDecision.PASSED,
) -> ComplianceRun:
return ComplianceRun(
id="run-immut-1",
candidate_id="cand-immut-1",
@@ -41,10 +51,13 @@ def _terminal_run(final_status: ComplianceDecision = ComplianceDecision.PASSED)
status=RunStatus.SUCCEEDED,
final_status=final_status,
)
# [/DEF:_terminal_run:Function]
# [DEF:test_report_builder_sets_immutable_snapshot_flag:Function]
# @RELATION: BINDS_TO -> TestReportAuditImmutability
# @PURPOSE: Ensure generated report payload is marked immutable and persisted as snapshot.
# @PRE: Terminal run exists.
# @POST: Built report has immutable=True and repository stores same immutable object.
@@ -59,10 +72,13 @@ def test_report_builder_sets_immutable_snapshot_flag():
assert report.immutable is True
assert persisted.immutable is True
assert repository.get_report(report.id) is persisted
# [/DEF:test_report_builder_sets_immutable_snapshot_flag:Function]
# [DEF:test_repository_rejects_report_overwrite_for_same_report_id:Function]
# @RELATION: BINDS_TO -> TestReportAuditImmutability
# @PURPOSE: Define immutability contract that report snapshots cannot be overwritten by same identifier.
# @PRE: Existing report with id is already persisted.
# @POST: Second save for same report id is rejected with explicit immutability error.
@@ -73,7 +89,11 @@ def test_repository_rejects_report_overwrite_for_same_report_id():
run_id="run-immut-1",
candidate_id="cand-immut-1",
final_status=ComplianceDecision.PASSED,
summary_json={"operator_summary": "original", "violations_count": 0, "blocking_violations_count": 0},
summary_json={
"operator_summary": "original",
"violations_count": 0,
"blocking_violations_count": 0,
},
generated_at=datetime.now(timezone.utc),
immutable=True,
)
@@ -82,7 +102,11 @@ def test_repository_rejects_report_overwrite_for_same_report_id():
run_id="run-immut-2",
candidate_id="cand-immut-2",
final_status=ComplianceDecision.ERROR,
summary_json={"operator_summary": "mutated", "violations_count": 1, "blocking_violations_count": 1},
summary_json={
"operator_summary": "mutated",
"violations_count": 1,
"blocking_violations_count": 1,
},
generated_at=datetime.now(timezone.utc),
immutable=True,
)
@@ -91,10 +115,13 @@ def test_repository_rejects_report_overwrite_for_same_report_id():
with pytest.raises(ValueError, match="immutable"):
repository.save_report(mutated)
# [/DEF:test_repository_rejects_report_overwrite_for_same_report_id:Function]
# [DEF:test_audit_hooks_emit_append_only_event_stream:Function]
# @RELATION: BINDS_TO -> TestReportAuditImmutability
# @PURPOSE: Verify audit hooks emit one event per action call and preserve call order.
# @PRE: Logger backend is patched.
# @POST: Three calls produce three ordered info entries with molecular prefixes.
@@ -109,6 +136,8 @@ def test_audit_hooks_emit_append_only_event_stream(mock_logger):
assert logged_messages[0].startswith("[REASON]")
assert logged_messages[1].startswith("[REFLECT]")
assert logged_messages[2].startswith("[EXPLORE]")
# [/DEF:test_audit_hooks_emit_append_only_event_stream:Function]
# [/DEF:backend.tests.services.clean_release.test_report_audit_immutability:Module]
# [/DEF:TestReportAuditImmutability:Module]

View File

@@ -1,3 +1,12 @@
# [DEF:TestAuth:Module]
# @COMPLEXITY: 3
# @PURPOSE: Covers authentication service/repository behavior and auth bootstrap helpers.
# @LAYER: Test
# @RELATION: TESTS -> AuthService
# @RELATION: TESTS -> AuthRepository
# @RELATION: TESTS -> create_admin
# @RELATION: TESTS -> ensure_encryption_key
import sys
from pathlib import Path
@@ -19,113 +28,145 @@ from src.scripts.init_auth_db import ensure_encryption_key
# Create in-memory SQLite database for testing
SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Create all tables
Base.metadata.create_all(bind=engine)
@pytest.fixture
def db_session():
"""Create a new database session with a transaction, rollback after test"""
connection = engine.connect()
transaction = connection.begin()
session = TestingSessionLocal(bind=connection)
yield session
session.close()
transaction.rollback()
connection.close()
@pytest.fixture
def auth_service(db_session):
return AuthService(db_session)
@pytest.fixture
def auth_repo(db_session):
return AuthRepository(db_session)
# [DEF:test_create_user:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_create_user(auth_repo):
"""Test user creation"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
auth_source="LOCAL",
)
auth_repo.db.add(user)
auth_repo.db.commit()
retrieved_user = auth_repo.get_user_by_username("testuser")
assert retrieved_user is not None
assert retrieved_user.username == "testuser"
assert retrieved_user.email == "test@example.com"
assert verify_password("testpassword123", retrieved_user.password_hash)
# [/DEF:test_create_user:Function]
# [DEF:test_authenticate_user:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_authenticate_user(auth_service, auth_repo):
"""Test user authentication with valid and invalid credentials"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
auth_source="LOCAL",
)
auth_repo.db.add(user)
auth_repo.db.commit()
# Test valid credentials
authenticated_user = auth_service.authenticate_user("testuser", "testpassword123")
assert authenticated_user is not None
assert authenticated_user.username == "testuser"
# Test invalid password
invalid_user = auth_service.authenticate_user("testuser", "wrongpassword")
assert invalid_user is None
# Test invalid username
invalid_user = auth_service.authenticate_user("nonexistent", "testpassword123")
assert invalid_user is None
# [/DEF:test_authenticate_user:Function]
# [DEF:test_create_session:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_create_session(auth_service, auth_repo):
"""Test session token creation"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
auth_source="LOCAL",
)
auth_repo.db.add(user)
auth_repo.db.commit()
session = auth_service.create_session(user)
assert "access_token" in session
assert "token_type" in session
assert session["token_type"] == "bearer"
assert len(session["access_token"]) > 0
# [/DEF:test_create_session:Function]
# [DEF:test_role_permission_association:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_role_permission_association(auth_repo):
"""Test role and permission association"""
role = Role(name="Admin", description="System administrator")
perm1 = Permission(resource="admin:users", action="READ")
perm2 = Permission(resource="admin:users", action="WRITE")
role.permissions.extend([perm1, perm2])
auth_repo.db.add(role)
auth_repo.db.commit()
retrieved_role = auth_repo.get_role_by_name("Admin")
assert retrieved_role is not None
assert len(retrieved_role.permissions) == 2
permissions = [f"{p.resource}:{p.action}" for p in retrieved_role.permissions]
assert "admin:users:READ" in permissions
assert "admin:users:WRITE" in permissions
# [/DEF:test_role_permission_association:Function]
# [DEF:test_user_role_association:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_user_role_association(auth_repo):
"""Test user and role association"""
role = Role(name="Admin", description="System administrator")
@@ -133,58 +174,84 @@ def test_user_role_association(auth_repo):
username="adminuser",
email="admin@example.com",
password_hash=get_password_hash("adminpass123"),
auth_source="LOCAL"
auth_source="LOCAL",
)
user.roles.append(role)
auth_repo.db.add(role)
auth_repo.db.add(user)
auth_repo.db.commit()
retrieved_user = auth_repo.get_user_by_username("adminuser")
assert retrieved_user is not None
assert len(retrieved_user.roles) == 1
assert retrieved_user.roles[0].name == "Admin"
# [/DEF:test_user_role_association:Function]
# [DEF:test_ad_group_mapping:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_ad_group_mapping(auth_repo):
"""Test AD group mapping"""
role = Role(name="ADFS_Admin", description="ADFS administrators")
auth_repo.db.add(role)
auth_repo.db.commit()
mapping = ADGroupMapping(ad_group="DOMAIN\\ADFS_Admins", role_id=role.id)
auth_repo.db.add(mapping)
auth_repo.db.commit()
retrieved_mapping = auth_repo.db.query(ADGroupMapping).filter_by(ad_group="DOMAIN\\ADFS_Admins").first()
retrieved_mapping = (
auth_repo.db.query(ADGroupMapping)
.filter_by(ad_group="DOMAIN\\ADFS_Admins")
.first()
)
assert retrieved_mapping is not None
assert retrieved_mapping.role_id == role.id
# [/DEF:test_ad_group_mapping:Function]
# [DEF:test_create_admin_creates_user_with_optional_email:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_create_admin_creates_user_with_optional_email(monkeypatch, db_session):
"""Test bootstrap admin creation stores optional email and Admin role"""
monkeypatch.setattr("src.scripts.create_admin.AuthSessionLocal", lambda: db_session)
result = create_admin("bootstrap-admin", "bootstrap-pass", "admin@example.com")
created_user = db_session.query(User).filter(User.username == "bootstrap-admin").first()
created_user = (
db_session.query(User).filter(User.username == "bootstrap-admin").first()
)
assert result == "created"
assert created_user is not None
assert created_user.email == "admin@example.com"
assert created_user.roles[0].name == "Admin"
# [/DEF:test_create_admin_creates_user_with_optional_email:Function]
# [DEF:test_create_admin_is_idempotent_for_existing_user:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_create_admin_is_idempotent_for_existing_user(monkeypatch, db_session):
"""Test bootstrap admin creation preserves existing user on repeated runs"""
monkeypatch.setattr("src.scripts.create_admin.AuthSessionLocal", lambda: db_session)
first_result = create_admin("bootstrap-admin-2", "bootstrap-pass")
second_result = create_admin("bootstrap-admin-2", "new-password", "changed@example.com")
second_result = create_admin(
"bootstrap-admin-2", "new-password", "changed@example.com"
)
created_user = db_session.query(User).filter(User.username == "bootstrap-admin-2").first()
created_user = (
db_session.query(User).filter(User.username == "bootstrap-admin-2").first()
)
assert first_result == "created"
assert second_result == "exists"
assert created_user is not None
@@ -193,6 +260,11 @@ def test_create_admin_is_idempotent_for_existing_user(monkeypatch, db_session):
assert not verify_password("new-password", created_user.password_hash)
# [/DEF:test_create_admin_is_idempotent_for_existing_user:Function]
# [DEF:test_ensure_encryption_key_generates_backend_env_file:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_ensure_encryption_key_generates_backend_env_file(monkeypatch, tmp_path):
"""Test first-time initialization generates and persists a Fernet key."""
env_file = tmp_path / ".env"
@@ -202,23 +274,41 @@ def test_ensure_encryption_key_generates_backend_env_file(monkeypatch, tmp_path)
assert generated_key
assert env_file.exists()
assert env_file.read_text(encoding="utf-8").strip() == f"ENCRYPTION_KEY={generated_key}"
assert (
env_file.read_text(encoding="utf-8").strip()
== f"ENCRYPTION_KEY={generated_key}"
)
assert verify_fernet_key(generated_key)
# [/DEF:test_ensure_encryption_key_generates_backend_env_file:Function]
# [DEF:test_ensure_encryption_key_reuses_existing_env_file_value:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_ensure_encryption_key_reuses_existing_env_file_value(monkeypatch, tmp_path):
"""Test persisted key is reused without rewriting file contents."""
env_file = tmp_path / ".env"
existing_key = Fernet.generate_key().decode()
env_file.write_text(f"ENCRYPTION_KEY={existing_key}\nOTHER=value\n", encoding="utf-8")
env_file.write_text(
f"ENCRYPTION_KEY={existing_key}\nOTHER=value\n", encoding="utf-8"
)
monkeypatch.delenv("ENCRYPTION_KEY", raising=False)
reused_key = ensure_encryption_key(env_file)
assert reused_key == existing_key
assert env_file.read_text(encoding="utf-8") == f"ENCRYPTION_KEY={existing_key}\nOTHER=value\n"
assert (
env_file.read_text(encoding="utf-8")
== f"ENCRYPTION_KEY={existing_key}\nOTHER=value\n"
)
# [/DEF:test_ensure_encryption_key_reuses_existing_env_file_value:Function]
# [DEF:test_ensure_encryption_key_prefers_process_environment:Function]
# @RELATION: BINDS_TO -> TestAuth
def test_ensure_encryption_key_prefers_process_environment(monkeypatch, tmp_path):
"""Test explicit process environment has priority over file generation."""
env_file = tmp_path / ".env"
@@ -231,6 +321,12 @@ def test_ensure_encryption_key_prefers_process_environment(monkeypatch, tmp_path
assert not env_file.exists()
# [/DEF:test_ensure_encryption_key_prefers_process_environment:Function]
def verify_fernet_key(value: str) -> bool:
Fernet(value.encode())
return True
# [/DEF:TestAuth:Module]

View File

@@ -1,8 +1,8 @@
# [DEF:test_log_persistence:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @SEMANTICS: test, log, persistence, unit_test
# @PURPOSE: Unit tests for TaskLogPersistenceService.
# @LAYER: Test
# @RELATION: TESTS -> TaskLogPersistenceService
# @COMPLEXITY: 5
# [SECTION: IMPORTS]
@@ -18,6 +18,7 @@ from src.core.task_manager.models import LogEntry, LogFilter
# [/SECTION]
# [DEF:TestLogPersistence:Class]
# @RELATION: BINDS_TO -> test_log_persistence
# @PURPOSE: Test suite for TaskLogPersistenceService.
# @COMPLEXITY: 5
# @TEST_DATA: log_entry -> {"task_id": "test-task-1", "level": "INFO", "source": "test_source", "message": "Test message"}

View File

@@ -1,4 +1,4 @@
# [DEF:tests.test_logger:Module]
# [DEF:TestLogger:Module]
# @COMPLEXITY: 3
# @SEMANTICS: logging, tests, belief_state
# @PURPOSE: Unit tests for the custom logger formatters and configuration context manager.
@@ -18,6 +18,7 @@ from src.core.config_models import LoggingConfig
# [DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Entry, Action, and Exit tags at DEBUG level.
@@ -50,6 +51,7 @@ def test_belief_scope_logs_entry_action_exit_at_debug(caplog):
# [DEF:test_belief_scope_error_handling:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that belief_scope logs Coherence:Failed on exception.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:Failed tag.
@@ -82,6 +84,7 @@ def test_belief_scope_error_handling(caplog):
# [DEF:test_belief_scope_success_coherence:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that belief_scope logs Coherence:OK on success.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:OK tag.
@@ -111,6 +114,7 @@ def test_belief_scope_success_coherence(caplog):
# [DEF:test_belief_scope_not_visible_at_info:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level.
# @PRE: belief_scope is available. caplog fixture is used.
# @POST: Entry/Exit/Coherence logs are not captured at INFO level.
@@ -133,6 +137,7 @@ def test_belief_scope_not_visible_at_info(caplog):
# [DEF:test_task_log_level_default:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that default task log level is INFO.
# @PRE: None.
# @POST: Default level is INFO.
@@ -144,6 +149,7 @@ def test_task_log_level_default():
# [DEF:test_should_log_task_level:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that should_log_task_level correctly filters log levels.
# @PRE: None.
# @POST: Filtering works correctly for all level combinations.
@@ -158,6 +164,7 @@ def test_should_log_task_level():
# [DEF:test_configure_logger_task_log_level:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that configure_logger updates task_log_level.
# @PRE: LoggingConfig is available.
# @POST: task_log_level is updated correctly.
@@ -185,6 +192,7 @@ def test_configure_logger_task_log_level():
# [DEF:test_enable_belief_state_flag:Function]
# @RELATION: BINDS_TO -> TestLogger
# @PURPOSE: Test that enable_belief_state flag controls belief_scope logging.
# @PRE: LoggingConfig is available. caplog fixture is used.
# @POST: belief_scope logs are controlled by the flag.
@@ -219,4 +227,4 @@ def test_enable_belief_state_flag(caplog):
)
configure_logger(config)
# [/DEF:test_enable_belief_state_flag:Function]
# [/DEF:tests.test_logger:Module]
# [/DEF:TestLogger:Module]

View File

@@ -2,6 +2,7 @@ from src.core.config_models import Environment
from src.core.logger import belief_scope
# [DEF:test_environment_model:Function]
# @RELATION: TESTS -> Environment
# @PURPOSE: Tests that Environment model correctly stores values.
# @PRE: Environment class is available.
# @POST: Values are verified.

View File

@@ -7,6 +7,7 @@ from src.dependencies import get_config_manager, get_task_manager, get_resource_
client = TestClient(app)
# [DEF:test_dashboards_api:Test]
# @RELATION: BINDS_TO -> SrcRoot
# @PURPOSE: Verify GET /api/dashboards contract compliance
# @TEST: Valid env_id returns 200 and dashboard list
# @TEST: Invalid env_id returns 404
@@ -59,6 +60,8 @@ def mock_deps():
app.dependency_overrides.clear()
# [DEF:test_get_dashboards_success:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_dashboards_success(mock_deps):
response = client.get("/api/dashboards?env_id=env1")
assert response.status_code == 200
@@ -68,10 +71,18 @@ def test_get_dashboards_success(mock_deps):
assert data["dashboards"][0]["title"] == "Sales"
assert data["dashboards"][0]["git_status"]["sync_status"] == "OK"
# [/DEF:test_get_dashboards_success:Function]
# [DEF:test_get_dashboards_not_found:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_dashboards_not_found(mock_deps):
response = client.get("/api/dashboards?env_id=invalid")
assert response.status_code == 404
# [/DEF:test_get_dashboards_not_found:Function]
# [DEF:test_get_dashboards_search:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_dashboards_search(mock_deps):
response = client.get("/api/dashboards?env_id=env1&search=Sales")
assert response.status_code == 200
@@ -82,12 +93,17 @@ def test_get_dashboards_search(mock_deps):
# [/DEF:test_dashboards_api:Test]
# [DEF:test_datasets_api:Test]
# @RELATION: BINDS_TO -> SrcRoot
# @PURPOSE: Verify GET /api/datasets contract compliance
# @TEST: Valid env_id returns 200 and dataset list
# @TEST: Invalid env_id returns 404
# @TEST: Search filter works
# @TEST: Negative - Service failure returns 503
# [/DEF:test_get_dashboards_search:Function]
# [DEF:test_get_datasets_success:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_success(mock_deps):
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None}
@@ -101,10 +117,18 @@ def test_get_datasets_success(mock_deps):
assert data["datasets"][0]["table_name"] == "orders"
assert data["datasets"][0]["mapped_fields"]["mapped"] == 5
# [/DEF:test_get_datasets_success:Function]
# [DEF:test_get_datasets_not_found:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_not_found(mock_deps):
response = client.get("/api/datasets?env_id=invalid")
assert response.status_code == 404
# [/DEF:test_get_datasets_not_found:Function]
# [DEF:test_get_datasets_search:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_search(mock_deps):
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None},
@@ -117,6 +141,10 @@ def test_get_datasets_search(mock_deps):
assert len(data["datasets"]) == 1
assert data["datasets"][0]["table_name"] == "orders"
# [/DEF:test_get_datasets_search:Function]
# [DEF:test_get_datasets_service_failure:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_service_failure(mock_deps):
mock_deps["resource"].get_datasets_with_status = AsyncMock(side_effect=Exception("Superset down"))
@@ -128,29 +156,47 @@ def test_get_datasets_service_failure(mock_deps):
# [DEF:test_pagination_boundaries:Test]
# @RELATION: BINDS_TO -> SrcRoot
# @PURPOSE: Verify pagination validation for GET endpoints
# @TEST: page<1 and page_size>100 return 400
# [/DEF:test_get_datasets_service_failure:Function]
# [DEF:test_get_dashboards_pagination_zero_page:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_dashboards_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page -> {page:0, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# [/DEF:test_get_dashboards_pagination_zero_page:Function]
# [DEF:test_get_dashboards_pagination_oversize:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_dashboards_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize -> {page_size:101, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_dashboards_pagination_oversize:Function]
# [DEF:test_get_datasets_pagination_zero_page:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page on datasets"""
response = client.get("/api/datasets?env_id=env1&page=0")
assert response.status_code == 400
# [/DEF:test_get_datasets_pagination_zero_page:Function]
# [DEF:test_get_datasets_pagination_oversize:Function]
# @RELATION: BINDS_TO -> UnknownModule
def test_get_datasets_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize on datasets"""
response = client.get("/api/datasets?env_id=env1&page_size=101")
assert response.status_code == 400
# [/DEF:test_pagination_boundaries:Test]
# [/DEF:test_get_datasets_pagination_oversize:Function]

View File

@@ -1,9 +1,9 @@
# [DEF:test_task_manager:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 5
# @SEMANTICS: task-manager, lifecycle, CRUD, log-buffer, filtering, tests
# @PURPOSE: Unit tests for TaskManager lifecycle, CRUD, log buffering, and filtering.
# @LAYER: Core
# @RELATION: TESTS -> backend.src.core.task_manager.manager.TaskManager
# @INVARIANT: TaskManager state changes are deterministic and testable with mocked dependencies.
import sys
@@ -17,6 +17,8 @@ from datetime import datetime
# Helper to create a TaskManager with mocked dependencies
# [DEF:_make_manager:Function]
# @RELATION: BINDS_TO -> test_task_manager
def _make_manager():
"""Create TaskManager with mocked plugin_loader and persistence services."""
mock_plugin_loader = MagicMock()
@@ -50,12 +52,18 @@ def _make_manager():
return manager, mock_plugin_loader, MockPersistence.return_value, MockLogPersistence.return_value
# [/DEF:_make_manager:Function]
# [DEF:_cleanup_manager:Function]
# @RELATION: BINDS_TO -> test_task_manager
def _cleanup_manager(manager):
"""Stop the flusher thread."""
manager._flusher_stop_event.set()
manager._flusher_thread.join(timeout=2)
# [/DEF:_cleanup_manager:Function]
class TestTaskManagerInit:
"""Tests for TaskManager initialization."""

View File

@@ -1,8 +1,8 @@
# [DEF:test_task_persistence:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @SEMANTICS: test, task, persistence, unit_test
# @PURPOSE: Unit tests for TaskPersistenceService.
# @LAYER: Test
# @RELATION: TESTS -> TaskPersistenceService
# @COMPLEXITY: 5
# @TEST_DATA: valid_task -> {"id": "test-uuid-1", "plugin_id": "backup", "status": "PENDING"}
@@ -21,6 +21,7 @@ from src.core.task_manager.models import Task, TaskStatus, LogEntry
# [DEF:TestTaskPersistenceHelpers:Class]
# @RELATION: BINDS_TO -> test_task_persistence
# @PURPOSE: Test suite for TaskPersistenceService static helper methods.
# @COMPLEXITY: 5
class TestTaskPersistenceHelpers:
@@ -110,6 +111,7 @@ class TestTaskPersistenceHelpers:
# [DEF:TestTaskPersistenceService:Class]
# @RELATION: BINDS_TO -> test_task_persistence
# @PURPOSE: Test suite for TaskPersistenceService CRUD operations.
# @COMPLEXITY: 5
# @TEST_DATA: valid_task -> {"id": "test-uuid-1", "plugin_id": "backup", "status": "PENDING"}