# [DEF:backend.tests.services.clean_release.test_compliance_execution_service:Module] # @TIER: CRITICAL # @SEMANTICS: tests, clean-release, compliance, pipeline, run-finalization # @PURPOSE: Validate stage pipeline and run finalization contracts for compliance execution. # @LAYER: Tests # @RELATION: TESTS -> backend.src.services.clean_release.compliance_orchestrator # @RELATION: TESTS -> backend.src.services.clean_release.report_builder # @INVARIANT: Missing manifest prevents run startup; failed execution cannot finalize as PASSED. from __future__ import annotations from datetime import datetime, timezone import pytest from src.models.clean_release import ( CleanPolicySnapshot, ComplianceDecision, DistributionManifest, ReleaseCandidate, SourceRegistrySnapshot, ) from src.services.clean_release.compliance_orchestrator import CleanComplianceOrchestrator from src.services.clean_release.enums import CandidateStatus, RunStatus from src.services.clean_release.report_builder import ComplianceReportBuilder from src.services.clean_release.repository import CleanReleaseRepository # [DEF:_seed_with_candidate_policy_registry:Function] # @PURPOSE: Build deterministic repository state for run startup tests. # @PRE: candidate_id and snapshot ids are non-empty. # @POST: Returns repository with candidate, policy and registry; manifest is optional. def _seed_with_candidate_policy_registry( *, with_manifest: bool, prohibited_detected_count: int = 0, ) -> tuple[CleanReleaseRepository, str, str, str]: repository = CleanReleaseRepository() candidate_id = "cand-us2-1" policy_id = "policy-us2-1" registry_id = "registry-us2-1" manifest_id = "manifest-us2-1" repository.save_candidate( ReleaseCandidate( id=candidate_id, version="1.0.0", source_snapshot_ref="git:sha-us2", created_by="tester", created_at=datetime.now(timezone.utc), status=CandidateStatus.MANIFEST_BUILT.value, ) ) repository.save_registry( SourceRegistrySnapshot( id=registry_id, registry_id="trusted-registry", registry_version="1.0.0", allowed_hosts=["repo.internal.local"], allowed_schemes=["https"], allowed_source_types=["repo"], immutable=True, ) ) repository.save_policy( CleanPolicySnapshot( id=policy_id, policy_id="trusted-policy", policy_version="1.0.0", content_json={"rules": []}, registry_snapshot_id=registry_id, immutable=True, ) ) if with_manifest: repository.save_manifest( DistributionManifest( id=manifest_id, candidate_id=candidate_id, manifest_version=1, manifest_digest="digest-us2-1", artifacts_digest="digest-us2-1", source_snapshot_ref="git:sha-us2", content_json={ "summary": { "included_count": 1, "excluded_count": 0 if prohibited_detected_count == 0 else prohibited_detected_count, "prohibited_detected_count": prohibited_detected_count, } }, created_by="tester", created_at=datetime.now(timezone.utc), immutable=True, ) ) return repository, candidate_id, policy_id, manifest_id # [/DEF:_seed_with_candidate_policy_registry:Function] # [DEF:test_run_without_manifest_rejected:Function] # @PURPOSE: Ensure compliance run cannot start when manifest is unresolved. # @PRE: Candidate/policy exist but manifest is missing. # @POST: start_check_run raises ValueError and no run is persisted. def test_run_without_manifest_rejected(): repository, candidate_id, policy_id, manifest_id = _seed_with_candidate_policy_registry(with_manifest=False) orchestrator = CleanComplianceOrchestrator(repository) with pytest.raises(ValueError, match="Manifest or Policy not found"): orchestrator.start_check_run( candidate_id=candidate_id, policy_id=policy_id, requested_by="tester", manifest_id=manifest_id, ) assert len(repository.check_runs) == 0 # [/DEF:test_run_without_manifest_rejected:Function] # [DEF:test_task_crash_mid_run_marks_failed:Function] # @PURPOSE: Ensure execution crash conditions force FAILED run status. # @PRE: Run exists, then required dependency becomes unavailable before execute_stages. # @POST: execute_stages persists run with FAILED status. def test_task_crash_mid_run_marks_failed(): repository, candidate_id, policy_id, manifest_id = _seed_with_candidate_policy_registry(with_manifest=True) orchestrator = CleanComplianceOrchestrator(repository) run = orchestrator.start_check_run( candidate_id=candidate_id, policy_id=policy_id, requested_by="tester", manifest_id=manifest_id, ) # Simulate mid-run crash dependency loss: registry snapshot disappears. repository.registries.clear() failed = orchestrator.execute_stages(run) assert failed.status == RunStatus.FAILED # [/DEF:test_task_crash_mid_run_marks_failed:Function] # [DEF:test_blocked_run_finalization_blocks_report_builder:Function] # @PURPOSE: Ensure blocked runs require blocking violations before report creation. # @PRE: Manifest contains prohibited artifacts leading to BLOCKED decision. # @POST: finalize keeps BLOCKED and report_builder rejects zero blocking violations. def test_blocked_run_finalization_blocks_report_builder(): repository, candidate_id, policy_id, manifest_id = _seed_with_candidate_policy_registry( with_manifest=True, prohibited_detected_count=1, ) orchestrator = CleanComplianceOrchestrator(repository) builder = ComplianceReportBuilder(repository) run = orchestrator.start_check_run( candidate_id=candidate_id, policy_id=policy_id, requested_by="tester", manifest_id=manifest_id, ) run = orchestrator.execute_stages(run) run = orchestrator.finalize_run(run) assert run.final_status == ComplianceDecision.BLOCKED assert run.status == RunStatus.SUCCEEDED with pytest.raises(ValueError, match="Blocked run requires at least one blocking violation"): builder.build_report_payload(run, []) # [/DEF:test_blocked_run_finalization_blocks_report_builder:Function] # [/DEF:backend.tests.services.clean_release.test_compliance_execution_service:Module]