semantic clean up

This commit is contained in:
2026-03-10 19:38:10 +03:00
parent 31717870e3
commit 542835e0ff
31 changed files with 5392 additions and 6647 deletions

View File

@@ -22,3 +22,6 @@ def test_audit_check_run(mock_logger):
def test_audit_report(mock_logger):
audit_report("rep-1", "cand-1")
mock_logger.info.assert_called_with("[EXPLORE] clean-release report_id=rep-1 candidate=cand-1")
# [/DEF:backend.tests.services.clean_release.test_audit_service:Module]

View File

@@ -3,7 +3,7 @@
# @SEMANTICS: tests, clean-release, preparation, flow
# @PURPOSE: Validate release candidate preparation flow, including policy evaluation and manifest persisting.
# @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.clean_release.preparation_service
# @RELATION: [DEPENDS_ON] ->[backend.src.services.clean_release.preparation_service:Module]
# @INVARIANT: Candidate preparation always persists manifest and candidate status deterministically.
import pytest
@@ -21,6 +21,8 @@ from src.models.clean_release import (
)
from src.services.clean_release.preparation_service import prepare_candidate
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_policy:Function]
# @PURPOSE: Build a valid clean profile policy fixture for preparation tests.
def _mock_policy() -> CleanProfilePolicy:
return CleanProfilePolicy(
policy_id="pol-1",
@@ -33,7 +35,10 @@ def _mock_policy() -> CleanProfilePolicy:
effective_from=datetime.now(timezone.utc),
profile=ProfileType.ENTERPRISE_CLEAN,
)
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_policy:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_registry:Function]
# @PURPOSE: Build an internal-only source registry fixture for preparation tests.
def _mock_registry() -> ResourceSourceRegistry:
return ResourceSourceRegistry(
registry_id="reg-1",
@@ -42,7 +47,10 @@ def _mock_registry() -> ResourceSourceRegistry:
updated_at=datetime.now(timezone.utc),
updated_by="tester"
)
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_registry:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_candidate:Function]
# @PURPOSE: Build a draft release candidate fixture with provided identifier.
def _mock_candidate(candidate_id: str) -> ReleaseCandidate:
return ReleaseCandidate(
candidate_id=candidate_id,
@@ -53,7 +61,15 @@ def _mock_candidate(candidate_id: str) -> ReleaseCandidate:
created_by="tester",
source_snapshot_ref="v1.0.0-snapshot"
)
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_candidate:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_success:Function]
# @PURPOSE: Verify candidate transitions to PREPARED when evaluation returns no violations.
# @TEST_CONTRACT: [valid_candidate + active_policy + internal_sources + no_violations] -> [status=PREPARED, manifest_persisted, candidate_saved]
# @TEST_SCENARIO: [prepare_success] -> [prepared status and persistence side effects are produced]
# @TEST_FIXTURE: [INLINE_MOCKS] -> INLINE_JSON
# @TEST_EDGE: [external_fail] -> [none; dependency interactions mocked and successful]
# @TEST_INVARIANT: [prepared_flow_persists_state] -> VERIFIED_BY: [prepare_success]
def test_prepare_candidate_success():
# Setup
repository = MagicMock()
@@ -82,7 +98,15 @@ def test_prepare_candidate_success():
assert candidate.status == ReleaseCandidateStatus.PREPARED
repository.save_manifest.assert_called_once()
repository.save_candidate.assert_called_with(candidate)
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_success:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_with_violations:Function]
# @PURPOSE: Verify candidate transitions to BLOCKED when evaluation returns blocking violations.
# @TEST_CONTRACT: [valid_candidate + active_policy + evaluation_with_violations] -> [status=BLOCKED, violations_exposed]
# @TEST_SCENARIO: [prepare_blocked_due_to_policy] -> [blocked status and violation list are produced]
# @TEST_FIXTURE: [INLINE_MOCKS] -> INLINE_JSON
# @TEST_EDGE: [external_fail] -> [none; dependency interactions mocked and successful]
# @TEST_INVARIANT: [blocked_flow_reports_violations] -> VERIFIED_BY: [prepare_blocked_due_to_policy]
def test_prepare_candidate_with_violations():
# Setup
repository = MagicMock()
@@ -110,14 +134,30 @@ def test_prepare_candidate_with_violations():
assert result["status"] == ReleaseCandidateStatus.BLOCKED.value
assert candidate.status == ReleaseCandidateStatus.BLOCKED
assert len(result["violations"]) == 1
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_with_violations:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_not_found:Function]
# @PURPOSE: Verify preparation raises ValueError when candidate does not exist.
# @TEST_CONTRACT: [missing_candidate] -> [ValueError('Candidate not found')]
# @TEST_SCENARIO: [prepare_missing_candidate] -> [raises candidate not found error]
# @TEST_FIXTURE: [INLINE_MOCKS] -> INLINE_JSON
# @TEST_EDGE: [missing_field] -> [candidate lookup returns None]
# @TEST_INVARIANT: [missing_candidate_is_rejected] -> VERIFIED_BY: [prepare_missing_candidate]
def test_prepare_candidate_not_found():
repository = MagicMock()
repository.get_candidate.return_value = None
with pytest.raises(ValueError, match="Candidate not found"):
prepare_candidate(repository, "non-existent", [], [], "op")
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_not_found:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_no_active_policy:Function]
# @PURPOSE: Verify preparation raises ValueError when no active policy is available.
# @TEST_CONTRACT: [candidate_present + missing_active_policy] -> [ValueError('Active clean policy not found')]
# @TEST_SCENARIO: [prepare_missing_policy] -> [raises active policy missing error]
# @TEST_FIXTURE: [INLINE_MOCKS] -> INLINE_JSON
# @TEST_EDGE: [invalid_type] -> [policy dependency resolves to None]
# @TEST_INVARIANT: [active_policy_required] -> VERIFIED_BY: [prepare_missing_policy]
def test_prepare_candidate_no_active_policy():
repository = MagicMock()
repository.get_candidate.return_value = _mock_candidate("cand-1")
@@ -125,3 +165,7 @@ def test_prepare_candidate_no_active_policy():
with pytest.raises(ValueError, match="Active clean policy not found"):
prepare_candidate(repository, "cand-1", [], [], "op")
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_no_active_policy:Function]
# [/DEF:backend.tests.services.clean_release.test_preparation_service:Module]

View File

@@ -55,4 +55,6 @@ def test_validate_internal_sources_external_blocked():
assert result["ok"] is False
assert len(result["violations"]) == 1
assert result["violations"][0]["category"] == "external-source"
assert result["violations"][0]["blocked_release"] is True
assert result["violations"][0]["blocked_release"] is True
# [/DEF:backend.tests.services.clean_release.test_source_isolation:Module]

View File

@@ -25,3 +25,6 @@ def test_derive_final_status_failed_skipped():
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER]
results[2].status = CheckStageStatus.SKIPPED
assert derive_final_status(results) == CheckFinalStatus.FAILED
# [/DEF:backend.tests.services.clean_release.test_stages:Module]

View File

@@ -35,89 +35,117 @@ from ...models.clean_release import (
from .policy_engine import CleanPolicyEngine
from .repository import CleanReleaseRepository
from .stages import derive_final_status
from ...core.logger import belief_scope
# [DEF:CleanComplianceOrchestrator:Class]
# @PURPOSE: Coordinate clean-release compliance verification stages.
class CleanComplianceOrchestrator:
# [DEF:CleanComplianceOrchestrator.__init__:Function]
# @PURPOSE: Bind repository dependency used for orchestrator persistence and lookups.
# @PRE: repository is a valid CleanReleaseRepository instance with required methods.
# @POST: self.repository is assigned and used by all orchestration steps.
# @SIDE_EFFECT: Stores repository reference on orchestrator instance.
# @DATA_CONTRACT: Input -> CleanReleaseRepository, Output -> None
def __init__(self, repository: CleanReleaseRepository):
self.repository = repository
with belief_scope("CleanComplianceOrchestrator.__init__"):
self.repository = repository
# [/DEF:CleanComplianceOrchestrator.__init__:Function]
# [DEF:start_check_run:Function]
# @PURPOSE: Initiate a new compliance run session.
# @PRE: candidate_id and policy_id must exist in repository.
# @POST: Returns initialized ComplianceRun in RUNNING state.
# @PRE: candidate_id/policy_id/manifest_id identify existing records in repository.
# @POST: Returns initialized ComplianceRun in RUNNING state persisted in repository.
# @SIDE_EFFECT: Reads manifest/policy and writes new ComplianceRun via repository.save_check_run.
# @DATA_CONTRACT: Input -> (candidate_id:str, policy_id:str, requested_by:str, manifest_id:str), Output -> ComplianceRun
def start_check_run(self, candidate_id: str, policy_id: str, requested_by: str, manifest_id: str) -> ComplianceRun:
manifest = self.repository.get_manifest(manifest_id)
policy = self.repository.get_policy(policy_id)
if not manifest or not policy:
raise ValueError("Manifest or Policy not found")
with belief_scope("start_check_run"):
manifest = self.repository.get_manifest(manifest_id)
policy = self.repository.get_policy(policy_id)
if not manifest or not policy:
raise ValueError("Manifest or Policy not found")
check_run = ComplianceRun(
id=f"check-{uuid4()}",
candidate_id=candidate_id,
manifest_id=manifest_id,
manifest_digest=manifest.manifest_digest,
policy_snapshot_id=policy_id,
registry_snapshot_id=policy.registry_snapshot_id,
requested_by=requested_by,
requested_at=datetime.now(timezone.utc),
status=RunStatus.RUNNING,
)
return self.repository.save_check_run(check_run)
check_run = ComplianceRun(
id=f"check-{uuid4()}",
candidate_id=candidate_id,
manifest_id=manifest_id,
manifest_digest=manifest.manifest_digest,
policy_snapshot_id=policy_id,
registry_snapshot_id=policy.registry_snapshot_id,
requested_by=requested_by,
requested_at=datetime.now(timezone.utc),
status=RunStatus.RUNNING,
)
return self.repository.save_check_run(check_run)
# [/DEF:start_check_run:Function]
# [DEF:execute_stages:Function]
# @PURPOSE: Execute or accept compliance stage outcomes and set intermediate/final check-run status fields.
# @PRE: check_run exists and references candidate/policy/registry/manifest identifiers resolvable by repository.
# @POST: Returns persisted ComplianceRun with status FAILED on missing dependencies, otherwise SUCCEEDED with final_status set.
# @SIDE_EFFECT: Reads candidate/policy/registry/manifest and persists updated check_run.
# @DATA_CONTRACT: Input -> (check_run:ComplianceRun, forced_results:Optional[List[ComplianceStageRun]]), Output -> ComplianceRun
def execute_stages(self, check_run: ComplianceRun, forced_results: Optional[List[ComplianceStageRun]] = None) -> ComplianceRun:
if forced_results is not None:
# In a real scenario, we'd persist these stages.
with belief_scope("execute_stages"):
if forced_results is not None:
# In a real scenario, we'd persist these stages.
return self.repository.save_check_run(check_run)
# Real Logic Integration
candidate = self.repository.get_candidate(check_run.candidate_id)
policy = self.repository.get_policy(check_run.policy_snapshot_id)
if not candidate or not policy:
check_run.status = RunStatus.FAILED
return self.repository.save_check_run(check_run)
registry = self.repository.get_registry(check_run.registry_snapshot_id)
manifest = self.repository.get_manifest(check_run.manifest_id)
if not registry or not manifest:
check_run.status = RunStatus.FAILED
return self.repository.save_check_run(check_run)
# Simulate stage execution and violation detection
# 1. DATA_PURITY
summary = manifest.content_json.get("summary", {})
purity_ok = summary.get("prohibited_detected_count", 0) == 0
if not purity_ok:
check_run.final_status = ComplianceDecision.BLOCKED
else:
check_run.final_status = ComplianceDecision.PASSED
check_run.status = RunStatus.SUCCEEDED
check_run.finished_at = datetime.now(timezone.utc)
return self.repository.save_check_run(check_run)
# Real Logic Integration
candidate = self.repository.get_candidate(check_run.candidate_id)
policy = self.repository.get_policy(check_run.policy_snapshot_id)
if not candidate or not policy:
check_run.status = RunStatus.FAILED
return self.repository.save_check_run(check_run)
registry = self.repository.get_registry(check_run.registry_snapshot_id)
manifest = self.repository.get_manifest(check_run.manifest_id)
if not registry or not manifest:
check_run.status = RunStatus.FAILED
return self.repository.save_check_run(check_run)
# Simulate stage execution and violation detection
# 1. DATA_PURITY
summary = manifest.content_json.get("summary", {})
purity_ok = summary.get("prohibited_detected_count", 0) == 0
if not purity_ok:
check_run.final_status = ComplianceDecision.BLOCKED
else:
check_run.final_status = ComplianceDecision.PASSED
check_run.status = RunStatus.SUCCEEDED
check_run.finished_at = datetime.now(timezone.utc)
return self.repository.save_check_run(check_run)
# [/DEF:execute_stages:Function]
# [DEF:finalize_run:Function]
# @PURPOSE: Finalize run status based on cumulative stage results.
# @POST: Status derivation follows strict MANDATORY_STAGE_ORDER.
# @PRE: check_run was started and may already contain a derived final_status from stage execution.
# @POST: Returns persisted ComplianceRun in SUCCEEDED status with final_status guaranteed non-empty.
# @SIDE_EFFECT: Mutates check_run terminal fields and persists via repository.save_check_run.
# @DATA_CONTRACT: Input -> ComplianceRun, Output -> ComplianceRun
def finalize_run(self, check_run: ComplianceRun) -> ComplianceRun:
# If not already set by execute_stages
if not check_run.final_status:
check_run.final_status = ComplianceDecision.PASSED
check_run.status = RunStatus.SUCCEEDED
check_run.finished_at = datetime.now(timezone.utc)
return self.repository.save_check_run(check_run)
with belief_scope("finalize_run"):
# If not already set by execute_stages
if not check_run.final_status:
check_run.final_status = ComplianceDecision.PASSED
check_run.status = RunStatus.SUCCEEDED
check_run.finished_at = datetime.now(timezone.utc)
return self.repository.save_check_run(check_run)
# [/DEF:finalize_run:Function]
# [/DEF:CleanComplianceOrchestrator:Class]
# [DEF:run_check_legacy:Function]
# @PURPOSE: Legacy wrapper for compatibility with previous orchestrator call style.
# @PRE: Candidate/policy/manifest identifiers are valid for repository.
# @POST: Returns finalized ComplianceRun produced by orchestrator.
# @PRE: repository and identifiers are valid and resolvable by orchestrator dependencies.
# @POST: Returns finalized ComplianceRun produced by orchestrator start->execute->finalize sequence.
# @SIDE_EFFECT: Reads/writes compliance entities through repository during orchestrator calls.
# @DATA_CONTRACT: Input -> (repository:CleanReleaseRepository, candidate_id:str, policy_id:str, requested_by:str, manifest_id:str), Output -> ComplianceRun
def run_check_legacy(
repository: CleanReleaseRepository,
candidate_id: str,
@@ -125,14 +153,15 @@ def run_check_legacy(
requested_by: str,
manifest_id: str,
) -> ComplianceRun:
orchestrator = CleanComplianceOrchestrator(repository)
run = orchestrator.start_check_run(
candidate_id=candidate_id,
policy_id=policy_id,
requested_by=requested_by,
manifest_id=manifest_id,
)
run = orchestrator.execute_stages(run)
return orchestrator.finalize_run(run)
with belief_scope("run_check_legacy"):
orchestrator = CleanComplianceOrchestrator(repository)
run = orchestrator.start_check_run(
candidate_id=candidate_id,
policy_id=policy_id,
requested_by=requested_by,
manifest_id=manifest_id,
)
run = orchestrator.execute_stages(run)
return orchestrator.finalize_run(run)
# [/DEF:run_check_legacy:Function]
# [/DEF:backend.src.services.clean_release.compliance_orchestrator:Module]