fix: commit semantic repair changes

This commit is contained in:
2026-03-21 11:22:25 +03:00
parent 0900208c1a
commit abee05558f
272 changed files with 4603 additions and 1668 deletions

View File

@@ -24,11 +24,13 @@ import starlette.requests
# [/SECTION] # [/SECTION]
# [DEF:router:Variable] # [DEF:router:Variable]
# @RELATION: DEPENDS_ON -> fastapi.APIRouter
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: APIRouter instance for authentication routes. # @PURPOSE: APIRouter instance for authentication routes.
router = APIRouter(prefix="/api/auth", tags=["auth"]) router = APIRouter(prefix="/api/auth", tags=["auth"])
# [/DEF:router:Variable] # [/DEF:router:Variable]
# [DEF:login_for_access_token:Function] # [DEF:login_for_access_token:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Authenticates a user and returns a JWT access token. # @PURPOSE: Authenticates a user and returns a JWT access token.
@@ -38,18 +40,19 @@ router = APIRouter(prefix="/api/auth", tags=["auth"])
# @PARAM: form_data (OAuth2PasswordRequestForm) - Login credentials. # @PARAM: form_data (OAuth2PasswordRequestForm) - Login credentials.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: Token - The generated JWT token. # @RETURN: Token - The generated JWT token.
# @RELATION: CALLS -> [AuthService.authenticate_user] # @RELATION: CALLS -> [authenticate_user]
# @RELATION: CALLS -> [AuthService.create_session] # @RELATION: CALLS -> [create_session]
@router.post("/login", response_model=Token) @router.post("/login", response_model=Token)
async def login_for_access_token( async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(), form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_auth_db)
db: Session = Depends(get_auth_db)
): ):
with belief_scope("api.auth.login"): with belief_scope("api.auth.login"):
auth_service = AuthService(db) auth_service = AuthService(db)
user = auth_service.authenticate_user(form_data.username, form_data.password) user = auth_service.authenticate_user(form_data.username, form_data.password)
if not user: if not user:
log_security_event("LOGIN_FAILED", form_data.username, {"reason": "Invalid credentials"}) log_security_event(
"LOGIN_FAILED", form_data.username, {"reason": "Invalid credentials"}
)
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password", detail="Incorrect username or password",
@@ -57,8 +60,11 @@ async def login_for_access_token(
) )
log_security_event("LOGIN_SUCCESS", user.username, {"source": "LOCAL"}) log_security_event("LOGIN_SUCCESS", user.username, {"source": "LOCAL"})
return auth_service.create_session(user) return auth_service.create_session(user)
# [/DEF:login_for_access_token:Function] # [/DEF:login_for_access_token:Function]
# [DEF:read_users_me:Function] # [DEF:read_users_me:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Retrieves the profile of the currently authenticated user. # @PURPOSE: Retrieves the profile of the currently authenticated user.
@@ -71,8 +77,11 @@ async def login_for_access_token(
async def read_users_me(current_user: UserSchema = Depends(get_current_user)): async def read_users_me(current_user: UserSchema = Depends(get_current_user)):
with belief_scope("api.auth.me"): with belief_scope("api.auth.me"):
return current_user return current_user
# [/DEF:read_users_me:Function] # [/DEF:read_users_me:Function]
# [DEF:logout:Function] # [DEF:logout:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Logs out the current user (placeholder for session revocation). # @PURPOSE: Logs out the current user (placeholder for session revocation).
@@ -87,8 +96,11 @@ async def logout(current_user: UserSchema = Depends(get_current_user)):
# In a stateless JWT setup, client-side token deletion is primary. # In a stateless JWT setup, client-side token deletion is primary.
# Server-side revocation (blacklisting) can be added here if needed. # Server-side revocation (blacklisting) can be added here if needed.
return {"message": "Successfully logged out"} return {"message": "Successfully logged out"}
# [/DEF:logout:Function] # [/DEF:logout:Function]
# [DEF:login_adfs:Function] # [DEF:login_adfs:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Initiates the ADFS OIDC login flow. # @PURPOSE: Initiates the ADFS OIDC login flow.
@@ -100,34 +112,43 @@ async def login_adfs(request: starlette.requests.Request):
if not is_adfs_configured(): if not is_adfs_configured():
raise HTTPException( raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE, status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="ADFS is not configured. Please set ADFS_CLIENT_ID, ADFS_CLIENT_SECRET, and ADFS_METADATA_URL environment variables." detail="ADFS is not configured. Please set ADFS_CLIENT_ID, ADFS_CLIENT_SECRET, and ADFS_METADATA_URL environment variables.",
) )
redirect_uri = request.url_for('auth_callback_adfs') redirect_uri = request.url_for("auth_callback_adfs")
return await oauth.adfs.authorize_redirect(request, str(redirect_uri)) return await oauth.adfs.authorize_redirect(request, str(redirect_uri))
# [/DEF:login_adfs:Function] # [/DEF:login_adfs:Function]
# [DEF:auth_callback_adfs:Function] # [DEF:auth_callback_adfs:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Handles the callback from ADFS after successful authentication. # @PURPOSE: Handles the callback from ADFS after successful authentication.
# @POST: Provisions user JIT and returns session token. # @POST: Provisions user JIT and returns session token.
# @RELATION: CALLS -> [AuthService.provision_adfs_user] # @RELATION: CALLS -> [provision_adfs_user]
# @RELATION: CALLS -> [AuthService.create_session] # @RELATION: CALLS -> [create_session]
@router.get("/callback/adfs", name="auth_callback_adfs") @router.get("/callback/adfs", name="auth_callback_adfs")
async def auth_callback_adfs(request: starlette.requests.Request, db: Session = Depends(get_auth_db)): async def auth_callback_adfs(
request: starlette.requests.Request, db: Session = Depends(get_auth_db)
):
with belief_scope("api.auth.callback_adfs"): with belief_scope("api.auth.callback_adfs"):
if not is_adfs_configured(): if not is_adfs_configured():
raise HTTPException( raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE, status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="ADFS is not configured. Please set ADFS_CLIENT_ID, ADFS_CLIENT_SECRET, and ADFS_METADATA_URL environment variables." detail="ADFS is not configured. Please set ADFS_CLIENT_ID, ADFS_CLIENT_SECRET, and ADFS_METADATA_URL environment variables.",
) )
token = await oauth.adfs.authorize_access_token(request) token = await oauth.adfs.authorize_access_token(request)
user_info = token.get('userinfo') user_info = token.get("userinfo")
if not user_info: if not user_info:
raise HTTPException(status_code=400, detail="Failed to retrieve user info from ADFS") raise HTTPException(
status_code=400, detail="Failed to retrieve user info from ADFS"
)
auth_service = AuthService(db) auth_service = AuthService(db)
user = auth_service.provision_adfs_user(user_info) user = auth_service.provision_adfs_user(user_info)
return auth_service.create_session(user) return auth_service.create_session(user)
# [/DEF:auth_callback_adfs:Function] # [/DEF:auth_callback_adfs:Function]
# [/DEF:AuthApi:Module] # [/DEF:AuthApi:Module]

View File

@@ -1,5 +1,5 @@
# [DEF:AssistantApiTests:Module] # [DEF:AssistantApiTests:Module]
# @C: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, assistant, api # @SEMANTICS: tests, assistant, api
# @PURPOSE: Validate assistant API endpoint logic via direct async handler invocation. # @PURPOSE: Validate assistant API endpoint logic via direct async handler invocation.
# @RELATION: DEPENDS_ON -> backend.src.api.routes.assistant # @RELATION: DEPENDS_ON -> backend.src.api.routes.assistant
@@ -21,15 +21,26 @@ from src.models.assistant import AssistantMessageRecord
# [DEF:_run_async:Function] # [DEF:_run_async:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
def _run_async(coro): def _run_async(coro):
return asyncio.run(coro) return asyncio.run(coro)
# [/DEF:_run_async:Function] # [/DEF:_run_async:Function]
# [DEF:_FakeTask:Class] # [DEF:_FakeTask:Class]
# @RELATION: BINDS_TO -> [AssistantApiTests] # @RELATION: BINDS_TO -> [AssistantApiTests]
class _FakeTask: class _FakeTask:
def __init__(self, id, status="SUCCESS", plugin_id="unknown", params=None, result=None, user_id=None): def __init__(
self,
id,
status="SUCCESS",
plugin_id="unknown",
params=None,
result=None,
user_id=None,
):
self.id = id self.id = id
self.status = status self.status = status
self.plugin_id = plugin_id self.plugin_id = plugin_id
@@ -38,18 +49,29 @@ class _FakeTask:
self.user_id = user_id self.user_id = user_id
self.started_at = datetime.utcnow() self.started_at = datetime.utcnow()
self.finished_at = datetime.utcnow() self.finished_at = datetime.utcnow()
# [/DEF:_FakeTask:Class] # [/DEF:_FakeTask:Class]
# [DEF:_FakeTaskManager:Class] # [DEF:_FakeTaskManager:Class]
# @RELATION: BINDS_TO -> [AssistantApiTests] # @RELATION: BINDS_TO -> [AssistantApiTests]
# @COMPLEXITY: 2
# @PURPOSE: In-memory task manager stub that records created tasks for route-level assertions.
# @INVARIANT: create_task stores tasks retrievable by get_task/get_tasks without external side effects.
class _FakeTaskManager: class _FakeTaskManager:
def __init__(self): def __init__(self):
self.tasks = {} self.tasks = {}
async def create_task(self, plugin_id, params, user_id=None): async def create_task(self, plugin_id, params, user_id=None):
task_id = f"task-{uuid.uuid4().hex[:8]}" task_id = f"task-{uuid.uuid4().hex[:8]}"
task = _FakeTask(task_id, status="STARTED", plugin_id=plugin_id, params=params, user_id=user_id) task = _FakeTask(
task_id,
status="STARTED",
plugin_id=plugin_id,
params=params,
user_id=user_id,
)
self.tasks[task_id] = task self.tasks[task_id] = task
return task return task
@@ -57,10 +79,14 @@ class _FakeTaskManager:
return self.tasks.get(task_id) return self.tasks.get(task_id)
def get_tasks(self, limit=20, offset=0): def get_tasks(self, limit=20, offset=0):
return sorted(self.tasks.values(), key=lambda t: t.id, reverse=True)[offset : offset + limit] return sorted(self.tasks.values(), key=lambda t: t.id, reverse=True)[
offset : offset + limit
]
def get_all_tasks(self): def get_all_tasks(self):
return list(self.tasks.values()) return list(self.tasks.values())
# [/DEF:_FakeTaskManager:Class] # [/DEF:_FakeTaskManager:Class]
@@ -79,14 +105,19 @@ class _FakeConfigManager:
class _Settings: class _Settings:
default_environment_id = "dev" default_environment_id = "dev"
llm = {} llm = {}
class _Config: class _Config:
settings = _Settings() settings = _Settings()
environments = [] environments = []
return _Config() return _Config()
# [/DEF:_FakeConfigManager:Class] # [/DEF:_FakeConfigManager:Class]
# [DEF:_admin_user:Function] # [DEF:_admin_user:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
def _admin_user(): def _admin_user():
user = MagicMock(spec=User) user = MagicMock(spec=User)
user.id = "u-admin" user.id = "u-admin"
@@ -95,16 +126,21 @@ def _admin_user():
role.name = "Admin" role.name = "Admin"
user.roles = [role] user.roles = [role]
return user return user
# [/DEF:_admin_user:Function] # [/DEF:_admin_user:Function]
# [DEF:_limited_user:Function] # [DEF:_limited_user:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
def _limited_user(): def _limited_user():
user = MagicMock(spec=User) user = MagicMock(spec=User)
user.id = "u-limited" user.id = "u-limited"
user.username = "limited" user.username = "limited"
user.roles = [] user.roles = []
return user return user
# [/DEF:_limited_user:Function] # [/DEF:_limited_user:Function]
@@ -136,11 +172,16 @@ class _FakeQuery:
def count(self): def count(self):
return len(self.items) return len(self.items)
# [/DEF:_FakeQuery:Class] # [/DEF:_FakeQuery:Class]
# [DEF:_FakeDb:Class] # [DEF:_FakeDb:Class]
# @RELATION: BINDS_TO -> [AssistantApiTests] # @RELATION: BINDS_TO -> [AssistantApiTests]
# @COMPLEXITY: 2
# @PURPOSE: Explicit in-memory DB session double limited to assistant message persistence paths.
# @INVARIANT: query/add/merge stay deterministic and never emulate unrelated SQLAlchemy behavior.
class _FakeDb: class _FakeDb:
def __init__(self): def __init__(self):
self.added = [] self.added = []
@@ -164,19 +205,25 @@ class _FakeDb:
def refresh(self, obj): def refresh(self, obj):
pass pass
# [/DEF:_FakeDb:Class] # [/DEF:_FakeDb:Class]
# [DEF:_clear_assistant_state:Function] # [DEF:_clear_assistant_state:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
def _clear_assistant_state(): def _clear_assistant_state():
assistant_routes.CONVERSATIONS.clear() assistant_routes.CONVERSATIONS.clear()
assistant_routes.USER_ACTIVE_CONVERSATION.clear() assistant_routes.USER_ACTIVE_CONVERSATION.clear()
assistant_routes.CONFIRMATIONS.clear() assistant_routes.CONFIRMATIONS.clear()
assistant_routes.ASSISTANT_AUDIT.clear() assistant_routes.ASSISTANT_AUDIT.clear()
# [/DEF:_clear_assistant_state:Function] # [/DEF:_clear_assistant_state:Function]
# [DEF:test_unknown_command_returns_needs_clarification:Function] # [DEF:test_unknown_command_returns_needs_clarification:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
# @PURPOSE: Unknown command should return clarification state and unknown intent. # @PURPOSE: Unknown command should return clarification state and unknown intent.
def test_unknown_command_returns_needs_clarification(monkeypatch): def test_unknown_command_returns_needs_clarification(monkeypatch):
_clear_assistant_state() _clear_assistant_state()
@@ -185,35 +232,44 @@ def test_unknown_command_returns_needs_clarification(monkeypatch):
# We mock LLM planner to return low confidence # We mock LLM planner to return low confidence
monkeypatch.setattr(assistant_routes, "_plan_intent_with_llm", lambda *a, **k: None) monkeypatch.setattr(assistant_routes, "_plan_intent_with_llm", lambda *a, **k: None)
resp = _run_async(assistant_routes.send_message( resp = _run_async(
assistant_routes.send_message(
req, req,
current_user=_admin_user(), current_user=_admin_user(),
task_manager=_FakeTaskManager(), task_manager=_FakeTaskManager(),
config_manager=_FakeConfigManager(), config_manager=_FakeConfigManager(),
db=_FakeDb() db=_FakeDb(),
)) )
)
assert resp.state == "needs_clarification" assert resp.state == "needs_clarification"
assert "уточните" in resp.text.lower() or "неоднозначна" in resp.text.lower() assert "уточните" in resp.text.lower() or "неоднозначна" in resp.text.lower()
# [/DEF:test_unknown_command_returns_needs_clarification:Function] # [/DEF:test_unknown_command_returns_needs_clarification:Function]
# [DEF:test_capabilities_question_returns_successful_help:Function] # [DEF:test_capabilities_question_returns_successful_help:Function]
# @RELATION: BINDS_TO -> AssistantApiTests
# @PURPOSE: Capability query should return deterministic help response. # @PURPOSE: Capability query should return deterministic help response.
def test_capabilities_question_returns_successful_help(monkeypatch): def test_capabilities_question_returns_successful_help(monkeypatch):
_clear_assistant_state() _clear_assistant_state()
req = assistant_routes.AssistantMessageRequest(message="что ты умеешь?") req = assistant_routes.AssistantMessageRequest(message="что ты умеешь?")
resp = _run_async(assistant_routes.send_message( resp = _run_async(
assistant_routes.send_message(
req, req,
current_user=_admin_user(), current_user=_admin_user(),
task_manager=_FakeTaskManager(), task_manager=_FakeTaskManager(),
config_manager=_FakeConfigManager(), config_manager=_FakeConfigManager(),
db=_FakeDb() db=_FakeDb(),
)) )
)
assert resp.state == "success" assert resp.state == "success"
assert "я могу сделать" in resp.text.lower() assert "я могу сделать" in resp.text.lower()
# [/DEF:test_capabilities_question_returns_successful_help:Function] # [/DEF:test_capabilities_question_returns_successful_help:Function]
# ... (rest of file trimmed for length, I've seen it and I'll keep the existing [DEF]s as is but add @RELATION) # ... (rest of file trimmed for length, I've seen it and I'll keep the existing [DEF]s as is but add @RELATION)

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.__tests__.test_assistant_authz:Module] # [DEF:TestAssistantAuthz:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, assistant, authz, confirmation, rbac # @SEMANTICS: tests, assistant, authz, confirmation, rbac
# @PURPOSE: Verify assistant confirmation ownership, expiration, and deny behavior for restricted users. # @PURPOSE: Verify assistant confirmation ownership, expiration, and deny behavior for restricted users.
@@ -16,8 +16,12 @@ from fastapi import HTTPException
# Force isolated sqlite databases for test module before dependencies import. # Force isolated sqlite databases for test module before dependencies import.
os.environ.setdefault("DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz.db") os.environ.setdefault("DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz.db")
os.environ.setdefault("TASKS_DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz_tasks.db") os.environ.setdefault(
os.environ.setdefault("AUTH_DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz_auth.db") "TASKS_DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz_tasks.db"
)
os.environ.setdefault(
"AUTH_DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_authz_auth.db"
)
from src.api.routes import assistant as assistant_module from src.api.routes import assistant as assistant_module
from src.models.assistant import ( from src.models.assistant import (
@@ -28,6 +32,7 @@ from src.models.assistant import (
# [DEF:_run_async:Function] # [DEF:_run_async:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Execute async endpoint handler in synchronous test context. # @PURPOSE: Execute async endpoint handler in synchronous test context.
# @PRE: coroutine is awaitable endpoint invocation. # @PRE: coroutine is awaitable endpoint invocation.
@@ -37,7 +42,10 @@ def _run_async(coroutine):
# [/DEF:_run_async:Function] # [/DEF:_run_async:Function]
# [DEF:_FakeTask:Class] # [DEF:_FakeTask:Class]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Lightweight task model used for assistant authz tests. # @PURPOSE: Lightweight task model used for assistant authz tests.
class _FakeTask: class _FakeTask:
@@ -49,8 +57,10 @@ class _FakeTask:
# [/DEF:_FakeTask:Class] # [/DEF:_FakeTask:Class]
# [DEF:_FakeTaskManager:Class] # [DEF:_FakeTaskManager:Class]
# @COMPLEXITY: 1 # @RELATION: BINDS_TO -> TestAssistantAuthz
# @PURPOSE: Minimal task manager for deterministic operation creation and lookup. # @COMPLEXITY: 2
# @PURPOSE: In-memory task manager double that records assistant-created tasks deterministically.
# @INVARIANT: Only create_task/get_task/get_tasks behavior used by assistant authz routes is emulated.
class _FakeTaskManager: class _FakeTaskManager:
def __init__(self): def __init__(self):
self._created = [] self._created = []
@@ -73,6 +83,7 @@ class _FakeTaskManager:
# [/DEF:_FakeTaskManager:Class] # [/DEF:_FakeTaskManager:Class]
# [DEF:_FakeConfigManager:Class] # [DEF:_FakeConfigManager:Class]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Provide deterministic environment aliases required by intent parsing. # @PURPOSE: Provide deterministic environment aliases required by intent parsing.
class _FakeConfigManager: class _FakeConfigManager:
@@ -85,6 +96,7 @@ class _FakeConfigManager:
# [/DEF:_FakeConfigManager:Class] # [/DEF:_FakeConfigManager:Class]
# [DEF:_admin_user:Function] # [DEF:_admin_user:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Build admin principal fixture. # @PURPOSE: Build admin principal fixture.
# @PRE: Test requires privileged principal for risky operations. # @PRE: Test requires privileged principal for risky operations.
@@ -96,6 +108,7 @@ def _admin_user():
# [/DEF:_admin_user:Function] # [/DEF:_admin_user:Function]
# [DEF:_other_admin_user:Function] # [DEF:_other_admin_user:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Build second admin principal fixture for ownership tests. # @PURPOSE: Build second admin principal fixture for ownership tests.
# @PRE: Ownership mismatch scenario needs distinct authenticated actor. # @PRE: Ownership mismatch scenario needs distinct authenticated actor.
@@ -107,6 +120,7 @@ def _other_admin_user():
# [/DEF:_other_admin_user:Function] # [/DEF:_other_admin_user:Function]
# [DEF:_limited_user:Function] # [DEF:_limited_user:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Build limited principal without required assistant execution privileges. # @PURPOSE: Build limited principal without required assistant execution privileges.
# @PRE: Permission denial scenario needs non-admin actor. # @PRE: Permission denial scenario needs non-admin actor.
@@ -117,7 +131,10 @@ def _limited_user():
# [/DEF:_limited_user:Function] # [/DEF:_limited_user:Function]
# [DEF:_FakeQuery:Class] # [DEF:_FakeQuery:Class]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Minimal chainable query object for fake DB interactions. # @PURPOSE: Minimal chainable query object for fake DB interactions.
class _FakeQuery: class _FakeQuery:
@@ -150,8 +167,10 @@ class _FakeQuery:
# [/DEF:_FakeQuery:Class] # [/DEF:_FakeQuery:Class]
# [DEF:_FakeDb:Class] # [DEF:_FakeDb:Class]
# @COMPLEXITY: 1 # @RELATION: BINDS_TO -> TestAssistantAuthz
# @PURPOSE: In-memory session substitute for assistant route persistence calls. # @COMPLEXITY: 2
# @PURPOSE: In-memory DB session double constrained to assistant message/confirmation/audit persistence paths.
# @INVARIANT: query/add/merge are intentionally narrow and must not claim full SQLAlchemy Session semantics.
class _FakeDb: class _FakeDb:
def __init__(self): def __init__(self):
self._messages = [] self._messages = []
@@ -197,6 +216,7 @@ class _FakeDb:
# [/DEF:_FakeDb:Class] # [/DEF:_FakeDb:Class]
# [DEF:_clear_assistant_state:Function] # [DEF:_clear_assistant_state:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Reset assistant process-local state between test cases. # @PURPOSE: Reset assistant process-local state between test cases.
# @PRE: Assistant globals may contain state from prior tests. # @PRE: Assistant globals may contain state from prior tests.
@@ -209,7 +229,10 @@ def _clear_assistant_state():
# [/DEF:_clear_assistant_state:Function] # [/DEF:_clear_assistant_state:Function]
# [DEF:test_confirmation_owner_mismatch_returns_403:Function] # [DEF:test_confirmation_owner_mismatch_returns_403:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @PURPOSE: Confirm endpoint should reject requests from user that does not own the confirmation token. # @PURPOSE: Confirm endpoint should reject requests from user that does not own the confirmation token.
# @PRE: Confirmation token is created by first admin actor. # @PRE: Confirmation token is created by first admin actor.
# @POST: Second actor receives 403 on confirm operation. # @POST: Second actor receives 403 on confirm operation.
@@ -245,7 +268,10 @@ def test_confirmation_owner_mismatch_returns_403():
# [/DEF:test_confirmation_owner_mismatch_returns_403:Function] # [/DEF:test_confirmation_owner_mismatch_returns_403:Function]
# [DEF:test_expired_confirmation_cannot_be_confirmed:Function] # [DEF:test_expired_confirmation_cannot_be_confirmed:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @PURPOSE: Expired confirmation token should be rejected and not create task. # @PURPOSE: Expired confirmation token should be rejected and not create task.
# @PRE: Confirmation token exists and is manually expired before confirm request. # @PRE: Confirmation token exists and is manually expired before confirm request.
# @POST: Confirm endpoint raises 400 and no task is created. # @POST: Confirm endpoint raises 400 and no task is created.
@@ -265,7 +291,9 @@ def test_expired_confirmation_cannot_be_confirmed():
db=db, db=db,
) )
) )
assistant_module.CONFIRMATIONS[create.confirmation_id].expires_at = datetime.utcnow() - timedelta(minutes=1) assistant_module.CONFIRMATIONS[create.confirmation_id].expires_at = (
datetime.utcnow() - timedelta(minutes=1)
)
with pytest.raises(HTTPException) as exc: with pytest.raises(HTTPException) as exc:
_run_async( _run_async(
@@ -282,7 +310,10 @@ def test_expired_confirmation_cannot_be_confirmed():
# [/DEF:test_expired_confirmation_cannot_be_confirmed:Function] # [/DEF:test_expired_confirmation_cannot_be_confirmed:Function]
# [DEF:test_limited_user_cannot_launch_restricted_operation:Function] # [DEF:test_limited_user_cannot_launch_restricted_operation:Function]
# @RELATION: BINDS_TO -> TestAssistantAuthz
# @PURPOSE: Limited user should receive denied state for privileged operation. # @PURPOSE: Limited user should receive denied state for privileged operation.
# @PRE: Restricted user attempts dangerous deploy command. # @PRE: Restricted user attempts dangerous deploy command.
# @POST: Assistant returns denied state and does not execute operation. # @POST: Assistant returns denied state and does not execute operation.
@@ -303,4 +334,4 @@ def test_limited_user_cannot_launch_restricted_operation():
# [/DEF:test_limited_user_cannot_launch_restricted_operation:Function] # [/DEF:test_limited_user_cannot_launch_restricted_operation:Function]
# [/DEF:backend.src.api.routes.__tests__.test_assistant_authz:Module] # [/DEF:TestAssistantAuthz:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.api.routes.test_clean_release_api:Module] # [DEF:TestCleanReleaseApi:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, api, clean-release, checks, reports # @SEMANTICS: tests, api, clean-release, checks, reports
# @PURPOSE: Contract tests for clean release checks and reports endpoints. # @PURPOSE: Contract tests for clean release checks and reports endpoints.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.api.routes.clean_release
# @INVARIANT: API returns deterministic payload shapes for checks and reports. # @INVARIANT: API returns deterministic payload shapes for checks and reports.
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -25,6 +25,8 @@ from src.models.clean_release import (
from src.services.clean_release.repository import CleanReleaseRepository from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_repo_with_seed_data:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseApi
def _repo_with_seed_data() -> CleanReleaseRepository: def _repo_with_seed_data() -> CleanReleaseRepository:
repo = CleanReleaseRepository() repo = CleanReleaseRepository()
repo.save_candidate( repo.save_candidate(
@@ -72,6 +74,11 @@ def _repo_with_seed_data() -> CleanReleaseRepository:
return repo return repo
# [/DEF:_repo_with_seed_data:Function]
# [DEF:test_start_check_and_get_status_contract:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseApi
def test_start_check_and_get_status_contract(): def test_start_check_and_get_status_contract():
repo = _repo_with_seed_data() repo = _repo_with_seed_data()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -89,7 +96,9 @@ def test_start_check_and_get_status_contract():
) )
assert start.status_code == 202 assert start.status_code == 202
payload = start.json() payload = start.json()
assert set(["check_run_id", "candidate_id", "status", "started_at"]).issubset(payload.keys()) assert set(["check_run_id", "candidate_id", "status", "started_at"]).issubset(
payload.keys()
)
check_run_id = payload["check_run_id"] check_run_id = payload["check_run_id"]
status_resp = client.get(f"/api/clean-release/checks/{check_run_id}") status_resp = client.get(f"/api/clean-release/checks/{check_run_id}")
@@ -102,6 +111,11 @@ def test_start_check_and_get_status_contract():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_start_check_and_get_status_contract:Function]
# [DEF:test_get_report_not_found_returns_404:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseApi
def test_get_report_not_found_returns_404(): def test_get_report_not_found_returns_404():
repo = _repo_with_seed_data() repo = _repo_with_seed_data()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -112,6 +126,12 @@ def test_get_report_not_found_returns_404():
finally: finally:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_get_report_not_found_returns_404:Function]
# [DEF:test_get_report_success:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseApi
def test_get_report_success(): def test_get_report_success():
repo = _repo_with_seed_data() repo = _repo_with_seed_data()
report = ComplianceReport( report = ComplianceReport(
@@ -123,7 +143,7 @@ def test_get_report_success():
operator_summary="all systems go", operator_summary="all systems go",
structured_payload_ref="manifest-1", structured_payload_ref="manifest-1",
violations_count=0, violations_count=0,
blocking_violations_count=0 blocking_violations_count=0,
) )
repo.save_report(report) repo.save_report(report)
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -135,8 +155,12 @@ def test_get_report_success():
finally: finally:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.tests.api.routes.test_clean_release_api:Module]
# [/DEF:test_get_report_success:Function]
# [DEF:test_prepare_candidate_api_success:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseApi
def test_prepare_candidate_api_success(): def test_prepare_candidate_api_success():
repo = _repo_with_seed_data() repo = _repo_with_seed_data()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -146,7 +170,9 @@ def test_prepare_candidate_api_success():
"/api/clean-release/candidates/prepare", "/api/clean-release/candidates/prepare",
json={ json={
"candidate_id": "2026.03.03-rc1", "candidate_id": "2026.03.03-rc1",
"artifacts": [{"path": "file1.txt", "category": "system-init", "reason": "core"}], "artifacts": [
{"path": "file1.txt", "category": "system-init", "reason": "core"}
],
"sources": ["repo.intra.company.local"], "sources": ["repo.intra.company.local"],
"operator_id": "operator-1", "operator_id": "operator-1",
}, },
@@ -157,3 +183,7 @@ def test_prepare_candidate_api_success():
assert "manifest_id" in data assert "manifest_id" in data
finally: finally:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_prepare_candidate_api_success:Function]
# [/DEF:TestCleanReleaseApi:Module]

View File

@@ -1,8 +1,8 @@
# [DEF:backend.src.api.routes.__tests__.test_clean_release_legacy_compat:Module] # [DEF:TestCleanReleaseLegacyCompat:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Compatibility tests for legacy clean-release API paths retained during v2 migration. # @PURPOSE: Compatibility tests for legacy clean-release API paths retained during v2 migration.
# @LAYER: Tests # @LAYER: Tests
# @RELATION: TESTS -> backend.src.api.routes.clean_release
from __future__ import annotations from __future__ import annotations
@@ -29,6 +29,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_seed_legacy_repo:Function] # [DEF:_seed_legacy_repo:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseLegacyCompat
# @PURPOSE: Seed in-memory repository with minimum trusted data for legacy endpoint contracts. # @PURPOSE: Seed in-memory repository with minimum trusted data for legacy endpoint contracts.
# @PRE: Repository is empty. # @PRE: Repository is empty.
# @POST: Candidate, policy, registry and manifest are available for legacy checks flow. # @POST: Candidate, policy, registry and manifest are available for legacy checks flow.
@@ -111,6 +112,8 @@ def _seed_legacy_repo() -> CleanReleaseRepository:
# [/DEF:_seed_legacy_repo:Function] # [/DEF:_seed_legacy_repo:Function]
# [DEF:test_legacy_prepare_endpoint_still_available:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseLegacyCompat
def test_legacy_prepare_endpoint_still_available() -> None: def test_legacy_prepare_endpoint_still_available() -> None:
repo = _seed_legacy_repo() repo = _seed_legacy_repo()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -133,6 +136,10 @@ def test_legacy_prepare_endpoint_still_available() -> None:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_legacy_prepare_endpoint_still_available:Function]
# [DEF:test_legacy_checks_endpoints_still_available:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseLegacyCompat
def test_legacy_checks_endpoints_still_available() -> None: def test_legacy_checks_endpoints_still_available() -> None:
repo = _seed_legacy_repo() repo = _seed_legacy_repo()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -162,4 +169,4 @@ def test_legacy_checks_endpoints_still_available() -> None:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.src.api.routes.__tests__.test_clean_release_legacy_compat:Module] # [/DEF:TestCleanReleaseLegacyCompat:Module]# [/DEF:test_legacy_checks_endpoints_still_available:Function]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.api.routes.test_clean_release_source_policy:Module] # [DEF:TestCleanReleaseSourcePolicy:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, api, clean-release, source-policy # @SEMANTICS: tests, api, clean-release, source-policy
# @PURPOSE: Validate API behavior for source isolation violations in clean release preparation. # @PURPOSE: Validate API behavior for source isolation violations in clean release preparation.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.api.routes.clean_release
# @INVARIANT: External endpoints must produce blocking violation entries. # @INVARIANT: External endpoints must produce blocking violation entries.
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -22,6 +22,8 @@ from src.models.clean_release import (
from src.services.clean_release.repository import CleanReleaseRepository from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_repo_with_seed_data:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseSourcePolicy
def _repo_with_seed_data() -> CleanReleaseRepository: def _repo_with_seed_data() -> CleanReleaseRepository:
repo = CleanReleaseRepository() repo = CleanReleaseRepository()
@@ -72,6 +74,10 @@ def _repo_with_seed_data() -> CleanReleaseRepository:
return repo return repo
# [/DEF:_repo_with_seed_data:Function]
# [DEF:test_prepare_candidate_blocks_external_source:Function]
# @RELATION: BINDS_TO -> TestCleanReleaseSourcePolicy
def test_prepare_candidate_blocks_external_source(): def test_prepare_candidate_blocks_external_source():
repo = _repo_with_seed_data() repo = _repo_with_seed_data()
app.dependency_overrides[get_clean_release_repository] = lambda: repo app.dependency_overrides[get_clean_release_repository] = lambda: repo
@@ -97,4 +103,4 @@ def test_prepare_candidate_blocks_external_source():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.tests.api.routes.test_clean_release_source_policy:Module] # [/DEF:TestCleanReleaseSourcePolicy:Module]# [/DEF:test_prepare_candidate_blocks_external_source:Function]

View File

@@ -23,7 +23,10 @@ from src.services.clean_release.enums import CandidateStatus
client = TestClient(app) client = TestClient(app)
# [REASON] Implementing API contract tests for candidate/artifact/manifest endpoints (T012). # [REASON] Implementing API contract tests for candidate/artifact/manifest endpoints (T012).
# [DEF:test_candidate_registration_contract:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ApiTests
def test_candidate_registration_contract(): def test_candidate_registration_contract():
""" """
@TEST_SCENARIO: candidate_registration -> Should return 201 and candidate DTO. @TEST_SCENARIO: candidate_registration -> Should return 201 and candidate DTO.
@@ -33,7 +36,7 @@ def test_candidate_registration_contract():
"id": "rc-test-001", "id": "rc-test-001",
"version": "1.0.0", "version": "1.0.0",
"source_snapshot_ref": "git:sha123", "source_snapshot_ref": "git:sha123",
"created_by": "test-user" "created_by": "test-user",
} }
response = client.post("/api/v2/clean-release/candidates", json=payload) response = client.post("/api/v2/clean-release/candidates", json=payload)
assert response.status_code == 201 assert response.status_code == 201
@@ -41,6 +44,12 @@ def test_candidate_registration_contract():
assert data["id"] == "rc-test-001" assert data["id"] == "rc-test-001"
assert data["status"] == CandidateStatus.DRAFT.value assert data["status"] == CandidateStatus.DRAFT.value
# [/DEF:test_candidate_registration_contract:Function]
# [DEF:test_artifact_import_contract:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ApiTests
def test_artifact_import_contract(): def test_artifact_import_contract():
""" """
@TEST_SCENARIO: artifact_import -> Should return 200 and success status. @TEST_SCENARIO: artifact_import -> Should return 200 and success status.
@@ -51,25 +60,30 @@ def test_artifact_import_contract():
"id": candidate_id, "id": candidate_id,
"version": "1.0.0", "version": "1.0.0",
"source_snapshot_ref": "git:sha123", "source_snapshot_ref": "git:sha123",
"created_by": "test-user" "created_by": "test-user",
} }
create_response = client.post("/api/v2/clean-release/candidates", json=bootstrap_candidate) create_response = client.post(
"/api/v2/clean-release/candidates", json=bootstrap_candidate
)
assert create_response.status_code == 201 assert create_response.status_code == 201
payload = { payload = {
"artifacts": [ "artifacts": [
{ {"id": "art-1", "path": "bin/app.exe", "sha256": "hash123", "size": 1024}
"id": "art-1",
"path": "bin/app.exe",
"sha256": "hash123",
"size": 1024
}
] ]
} }
response = client.post(f"/api/v2/clean-release/candidates/{candidate_id}/artifacts", json=payload) response = client.post(
f"/api/v2/clean-release/candidates/{candidate_id}/artifacts", json=payload
)
assert response.status_code == 200 assert response.status_code == 200
assert response.json()["status"] == "success" assert response.json()["status"] == "success"
# [/DEF:test_artifact_import_contract:Function]
# [DEF:test_manifest_build_contract:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ApiTests
def test_manifest_build_contract(): def test_manifest_build_contract():
""" """
@TEST_SCENARIO: manifest_build -> Should return 201 and manifest DTO. @TEST_SCENARIO: manifest_build -> Should return 201 and manifest DTO.
@@ -80,9 +94,11 @@ def test_manifest_build_contract():
"id": candidate_id, "id": candidate_id,
"version": "1.0.0", "version": "1.0.0",
"source_snapshot_ref": "git:sha123", "source_snapshot_ref": "git:sha123",
"created_by": "test-user" "created_by": "test-user",
} }
create_response = client.post("/api/v2/clean-release/candidates", json=bootstrap_candidate) create_response = client.post(
"/api/v2/clean-release/candidates", json=bootstrap_candidate
)
assert create_response.status_code == 201 assert create_response.status_code == 201
response = client.post(f"/api/v2/clean-release/candidates/{candidate_id}/manifests") response = client.post(f"/api/v2/clean-release/candidates/{candidate_id}/manifests")
@@ -91,4 +107,6 @@ def test_manifest_build_contract():
assert "manifest_digest" in data assert "manifest_digest" in data
assert data["candidate_id"] == candidate_id assert data["candidate_id"] == candidate_id
# [/DEF:test_manifest_build_contract:Function]
# [/DEF:CleanReleaseV2ApiTests:Module] # [/DEF:CleanReleaseV2ApiTests:Module]

View File

@@ -23,6 +23,8 @@ test_app.include_router(clean_release_v2_router)
client = TestClient(test_app) client = TestClient(test_app)
# [DEF:_seed_candidate_and_passed_report:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ReleaseApiTests
def _seed_candidate_and_passed_report() -> tuple[str, str]: def _seed_candidate_and_passed_report() -> tuple[str, str]:
repository = get_clean_release_repository() repository = get_clean_release_repository()
candidate_id = f"api-release-candidate-{uuid4()}" candidate_id = f"api-release-candidate-{uuid4()}"
@@ -52,6 +54,10 @@ def _seed_candidate_and_passed_report() -> tuple[str, str]:
return candidate_id, report_id return candidate_id, report_id
# [/DEF:_seed_candidate_and_passed_report:Function]
# [DEF:test_release_approve_and_publish_revoke_contract:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ReleaseApiTests
def test_release_approve_and_publish_revoke_contract() -> None: def test_release_approve_and_publish_revoke_contract() -> None:
"""Contract for approve -> publish -> revoke lifecycle endpoints.""" """Contract for approve -> publish -> revoke lifecycle endpoints."""
candidate_id, report_id = _seed_candidate_and_passed_report() candidate_id, report_id = _seed_candidate_and_passed_report()
@@ -90,6 +96,10 @@ def test_release_approve_and_publish_revoke_contract() -> None:
assert revoke_payload["publication"]["status"] == "REVOKED" assert revoke_payload["publication"]["status"] == "REVOKED"
# [/DEF:test_release_approve_and_publish_revoke_contract:Function]
# [DEF:test_release_reject_contract:Function]
# @RELATION: BINDS_TO -> CleanReleaseV2ReleaseApiTests
def test_release_reject_contract() -> None: def test_release_reject_contract() -> None:
"""Contract for reject endpoint.""" """Contract for reject endpoint."""
candidate_id, report_id = _seed_candidate_and_passed_report() candidate_id, report_id = _seed_candidate_and_passed_report()
@@ -104,4 +114,4 @@ def test_release_reject_contract() -> None:
assert payload["decision"] == "REJECTED" assert payload["decision"] == "REJECTED"
# [/DEF:CleanReleaseV2ReleaseApiTests:Module] # [/DEF:CleanReleaseV2ReleaseApiTests:Module]# [/DEF:test_release_reject_contract:Function]

View File

@@ -39,6 +39,8 @@ def db_session():
session.close() session.close()
# [DEF:test_list_connections_bootstraps_missing_table:Function]
# @RELATION: BINDS_TO -> ConnectionsRoutesTests
def test_list_connections_bootstraps_missing_table(db_session): def test_list_connections_bootstraps_missing_table(db_session):
from src.api.routes.connections import list_connections from src.api.routes.connections import list_connections
@@ -49,6 +51,10 @@ def test_list_connections_bootstraps_missing_table(db_session):
assert "connection_configs" in inspector.get_table_names() assert "connection_configs" in inspector.get_table_names()
# [/DEF:test_list_connections_bootstraps_missing_table:Function]
# [DEF:test_create_connection_bootstraps_missing_table:Function]
# @RELATION: BINDS_TO -> ConnectionsRoutesTests
def test_create_connection_bootstraps_missing_table(db_session): def test_create_connection_bootstraps_missing_table(db_session):
from src.api.routes.connections import ConnectionCreate, create_connection from src.api.routes.connections import ConnectionCreate, create_connection
@@ -70,3 +76,4 @@ def test_create_connection_bootstraps_missing_table(db_session):
assert "connection_configs" in inspector.get_table_names() assert "connection_configs" in inspector.get_table_names()
# [/DEF:ConnectionsRoutesTests:Module] # [/DEF:ConnectionsRoutesTests:Module]
# [/DEF:test_create_connection_bootstraps_missing_table:Function]

View File

@@ -10,7 +10,14 @@ from datetime import datetime, timezone
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from src.app import app from src.app import app
from src.api.routes.dashboards import DashboardsResponse from src.api.routes.dashboards import DashboardsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service from src.dependencies import (
get_current_user,
has_permission,
get_config_manager,
get_task_manager,
get_resource_service,
get_mapping_service,
)
from src.core.database import get_db from src.core.database import get_db
from src.services.profile_service import ProfileService as DomainProfileService from src.services.profile_service import ProfileService as DomainProfileService
@@ -23,6 +30,7 @@ admin_role = MagicMock()
admin_role.name = "Admin" admin_role.name = "Admin"
mock_user.roles.append(admin_role) mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def mock_deps(): def mock_deps():
config_manager = MagicMock() config_manager = MagicMock()
@@ -39,9 +47,15 @@ def mock_deps():
app.dependency_overrides[get_current_user] = lambda: mock_user app.dependency_overrides[get_current_user] = lambda: mock_user
app.dependency_overrides[get_db] = lambda: db app.dependency_overrides[get_db] = lambda: db
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user app.dependency_overrides[has_permission("plugin:migration", "READ")] = (
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user )
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = (
lambda: mock_user
)
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = (
lambda: mock_user
)
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
yield { yield {
@@ -53,10 +67,12 @@ def mock_deps():
} }
app.dependency_overrides.clear() app.dependency_overrides.clear()
client = TestClient(app) client = TestClient(app)
# [DEF:test_get_dashboards_success:Function] # [DEF:test_get_dashboards_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing returns a populated response that satisfies the schema contract. # @PURPOSE: Validate dashboards listing returns a populated response that satisfies the schema contract.
# @TEST: GET /api/dashboards returns 200 and valid schema # @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists # @PRE: env_id exists
@@ -69,15 +85,17 @@ def test_get_dashboards_success(mock_deps):
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
# @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"} # @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
return_value=[
{ {
"id": 1, "id": 1,
"title": "Main Revenue", "title": "Main Revenue",
"slug": "main-revenue", "slug": "main-revenue",
"git_status": {"branch": "main", "sync_status": "OK"}, "git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"} "last_task": {"task_id": "task-1", "status": "SUCCESS"},
} }
]) ]
)
response = client.get("/api/dashboards?env_id=prod") response = client.get("/api/dashboards?env_id=prod")
@@ -96,6 +114,7 @@ def test_get_dashboards_success(mock_deps):
# [DEF:test_get_dashboards_with_search:Function] # [DEF:test_get_dashboards_with_search:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing applies the search filter and returns only matching rows. # @PURPOSE: Validate dashboards listing applies the search filter and returns only matching rows.
# @TEST: GET /api/dashboards filters by search term # @TEST: GET /api/dashboards filters by search term
# @PRE: search parameter provided # @PRE: search parameter provided
@@ -108,9 +127,22 @@ def test_get_dashboards_with_search(mock_deps):
async def mock_get_dashboards(env, tasks, include_git_status=False): async def mock_get_dashboards(env, tasks, include_git_status=False):
return [ return [
{"id": 1, "title": "Sales Report", "slug": "sales", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None}, {
{"id": 2, "title": "Marketing Dashboard", "slug": "marketing", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None} "id": 1,
"title": "Sales Report",
"slug": "sales",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": None,
},
{
"id": 2,
"title": "Marketing Dashboard",
"slug": "marketing",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": None,
},
] ]
mock_deps["resource"].get_dashboards_with_status = AsyncMock( mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards side_effect=mock_get_dashboards
) )
@@ -128,6 +160,7 @@ def test_get_dashboards_with_search(mock_deps):
# [DEF:test_get_dashboards_empty:Function] # [DEF:test_get_dashboards_empty:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing returns an empty payload for an environment without dashboards. # @PURPOSE: Validate dashboards listing returns an empty payload for an environment without dashboards.
# @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0} # @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}
def test_get_dashboards_empty(mock_deps): def test_get_dashboards_empty(mock_deps):
@@ -145,10 +178,13 @@ def test_get_dashboards_empty(mock_deps):
assert len(data["dashboards"]) == 0 assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1 assert data["total_pages"] == 1
DashboardsResponse(**data) DashboardsResponse(**data)
# [/DEF:test_get_dashboards_empty:Function] # [/DEF:test_get_dashboards_empty:Function]
# [DEF:test_get_dashboards_superset_failure:Function] # [DEF:test_get_dashboards_superset_failure:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing surfaces a 503 contract when Superset access fails. # @PURPOSE: Validate dashboards listing surfaces a 503 contract when Superset access fails.
# @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503} # @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}
def test_get_dashboards_superset_failure(mock_deps): def test_get_dashboards_superset_failure(mock_deps):
@@ -164,10 +200,13 @@ def test_get_dashboards_superset_failure(mock_deps):
response = client.get("/api/dashboards?env_id=bad_conn") response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503 assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"] assert "Failed to fetch dashboards" in response.json()["detail"]
# [/DEF:test_get_dashboards_superset_failure:Function] # [/DEF:test_get_dashboards_superset_failure:Function]
# [DEF:test_get_dashboards_env_not_found:Function] # [DEF:test_get_dashboards_env_not_found:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing returns 404 when the requested environment does not exist. # @PURPOSE: Validate dashboards listing returns 404 when the requested environment does not exist.
# @TEST: GET /api/dashboards returns 404 if env_id missing # @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist # @PRE: env_id does not exist
@@ -184,6 +223,7 @@ def test_get_dashboards_env_not_found(mock_deps):
# [DEF:test_get_dashboards_invalid_pagination:Function] # [DEF:test_get_dashboards_invalid_pagination:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboards listing rejects invalid pagination parameters with 400 responses. # @PURPOSE: Validate dashboards listing rejects invalid pagination parameters with 400 responses.
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size # @TEST: GET /api/dashboards returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100 # @PRE: page < 1 or page_size > 100
@@ -201,10 +241,13 @@ def test_get_dashboards_invalid_pagination(mock_deps):
response = client.get("/api/dashboards?env_id=prod&page_size=101") response = client.get("/api/dashboards?env_id=prod&page_size=101")
assert response.status_code == 400 assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"] assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_dashboards_invalid_pagination:Function] # [/DEF:test_get_dashboards_invalid_pagination:Function]
# [DEF:test_get_dashboard_detail_success:Function] # [DEF:test_get_dashboard_detail_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboard detail returns charts and datasets for an existing dashboard. # @PURPOSE: Validate dashboard detail returns charts and datasets for an existing dashboard.
# @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets # @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets
def test_get_dashboard_detail_success(mock_deps): def test_get_dashboard_detail_success(mock_deps):
@@ -229,7 +272,7 @@ def test_get_dashboard_detail_success(mock_deps):
"viz_type": "line", "viz_type": "line",
"dataset_id": 7, "dataset_id": 7,
"last_modified": "2026-02-19T10:00:00+00:00", "last_modified": "2026-02-19T10:00:00+00:00",
"overview": "line" "overview": "line",
} }
], ],
"datasets": [ "datasets": [
@@ -239,11 +282,11 @@ def test_get_dashboard_detail_success(mock_deps):
"schema": "mart", "schema": "mart",
"database": "Analytics", "database": "Analytics",
"last_modified": "2026-02-18T10:00:00+00:00", "last_modified": "2026-02-18T10:00:00+00:00",
"overview": "mart.fact_revenue" "overview": "mart.fact_revenue",
} }
], ],
"chart_count": 1, "chart_count": 1,
"dataset_count": 1 "dataset_count": 1,
} }
mock_client_cls.return_value = mock_client mock_client_cls.return_value = mock_client
@@ -254,10 +297,13 @@ def test_get_dashboard_detail_success(mock_deps):
assert payload["id"] == 42 assert payload["id"] == 42
assert payload["chart_count"] == 1 assert payload["chart_count"] == 1
assert payload["dataset_count"] == 1 assert payload["dataset_count"] == 1
# [/DEF:test_get_dashboard_detail_success:Function] # [/DEF:test_get_dashboard_detail_success:Function]
# [DEF:test_get_dashboard_detail_env_not_found:Function] # [DEF:test_get_dashboard_detail_env_not_found:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboard detail returns 404 when the requested environment is missing. # @PURPOSE: Validate dashboard detail returns 404 when the requested environment is missing.
# @TEST: GET /api/dashboards/{id} returns 404 for missing environment # @TEST: GET /api/dashboards/{id} returns 404 for missing environment
def test_get_dashboard_detail_env_not_found(mock_deps): def test_get_dashboard_detail_env_not_found(mock_deps):
@@ -267,10 +313,13 @@ def test_get_dashboard_detail_env_not_found(mock_deps):
assert response.status_code == 404 assert response.status_code == 404
assert "Environment not found" in response.json()["detail"] assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboard_detail_env_not_found:Function] # [/DEF:test_get_dashboard_detail_env_not_found:Function]
# [DEF:test_migrate_dashboards_success:Function] # [DEF:test_migrate_dashboards_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: POST /api/dashboards/migrate creates migration task # @TEST: POST /api/dashboards/migrate creates migration task
# @PRE: Valid source_env_id, target_env_id, dashboard_ids # @PRE: Valid source_env_id, target_env_id, dashboard_ids
# @PURPOSE: Validate dashboard migration request creates an async task and returns its identifier. # @PURPOSE: Validate dashboard migration request creates an async task and returns its identifier.
@@ -292,8 +341,8 @@ def test_migrate_dashboards_success(mock_deps):
"source_env_id": "source", "source_env_id": "source",
"target_env_id": "target", "target_env_id": "target",
"dashboard_ids": [1, 2, 3], "dashboard_ids": [1, 2, 3],
"db_mappings": {"old_db": "new_db"} "db_mappings": {"old_db": "new_db"},
} },
) )
assert response.status_code == 200 assert response.status_code == 200
@@ -307,6 +356,7 @@ def test_migrate_dashboards_success(mock_deps):
# [DEF:test_migrate_dashboards_no_ids:Function] # [DEF:test_migrate_dashboards_no_ids:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids # @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
# @PRE: dashboard_ids is empty # @PRE: dashboard_ids is empty
# @PURPOSE: Validate dashboard migration rejects empty dashboard identifier lists. # @PURPOSE: Validate dashboard migration rejects empty dashboard identifier lists.
@@ -317,8 +367,8 @@ def test_migrate_dashboards_no_ids(mock_deps):
json={ json={
"source_env_id": "source", "source_env_id": "source",
"target_env_id": "target", "target_env_id": "target",
"dashboard_ids": [] "dashboard_ids": [],
} },
) )
assert response.status_code == 400 assert response.status_code == 400
@@ -329,6 +379,7 @@ def test_migrate_dashboards_no_ids(mock_deps):
# [DEF:test_migrate_dashboards_env_not_found:Function] # [DEF:test_migrate_dashboards_env_not_found:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate migration creation returns 404 when the source environment cannot be resolved. # @PURPOSE: Validate migration creation returns 404 when the source environment cannot be resolved.
# @PRE: source_env_id and target_env_id are valid environment IDs # @PRE: source_env_id and target_env_id are valid environment IDs
def test_migrate_dashboards_env_not_found(mock_deps): def test_migrate_dashboards_env_not_found(mock_deps):
@@ -336,18 +387,17 @@ def test_migrate_dashboards_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = [] mock_deps["config"].get_environments.return_value = []
response = client.post( response = client.post(
"/api/dashboards/migrate", "/api/dashboards/migrate",
json={ json={"source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1]},
"source_env_id": "ghost",
"target_env_id": "t",
"dashboard_ids": [1]
}
) )
assert response.status_code == 404 assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"] assert "Source environment not found" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_env_not_found:Function] # [/DEF:test_migrate_dashboards_env_not_found:Function]
# [DEF:test_backup_dashboards_success:Function] # [DEF:test_backup_dashboards_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: POST /api/dashboards/backup creates backup task # @TEST: POST /api/dashboards/backup creates backup task
# @PRE: Valid env_id, dashboard_ids # @PRE: Valid env_id, dashboard_ids
# @PURPOSE: Validate dashboard backup request creates an async backup task and returns its identifier. # @PURPOSE: Validate dashboard backup request creates an async backup task and returns its identifier.
@@ -363,11 +413,7 @@ def test_backup_dashboards_success(mock_deps):
response = client.post( response = client.post(
"/api/dashboards/backup", "/api/dashboards/backup",
json={ json={"env_id": "prod", "dashboard_ids": [1, 2, 3], "schedule": "0 0 * * *"},
"env_id": "prod",
"dashboard_ids": [1, 2, 3],
"schedule": "0 0 * * *"
}
) )
assert response.status_code == 200 assert response.status_code == 200
@@ -381,24 +427,24 @@ def test_backup_dashboards_success(mock_deps):
# [DEF:test_backup_dashboards_env_not_found:Function] # [DEF:test_backup_dashboards_env_not_found:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate backup task creation returns 404 when the target environment is missing. # @PURPOSE: Validate backup task creation returns 404 when the target environment is missing.
# @PRE: env_id is a valid environment ID # @PRE: env_id is a valid environment ID
def test_backup_dashboards_env_not_found(mock_deps): def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID.""" """@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = [] mock_deps["config"].get_environments.return_value = []
response = client.post( response = client.post(
"/api/dashboards/backup", "/api/dashboards/backup", json={"env_id": "ghost", "dashboard_ids": [1]}
json={
"env_id": "ghost",
"dashboard_ids": [1]
}
) )
assert response.status_code == 404 assert response.status_code == 404
assert "Environment not found" in response.json()["detail"] assert "Environment not found" in response.json()["detail"]
# [/DEF:test_backup_dashboards_env_not_found:Function] # [/DEF:test_backup_dashboards_env_not_found:Function]
# [DEF:test_get_database_mappings_success:Function] # [DEF:test_get_database_mappings_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions # @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
# @PRE: Valid source_env_id, target_env_id # @PRE: Valid source_env_id, target_env_id
# @PURPOSE: Validate database mapping suggestions are returned for valid source and target environments. # @PURPOSE: Validate database mapping suggestions are returned for valid source and target environments.
@@ -410,17 +456,21 @@ def test_get_database_mappings_success(mock_deps):
mock_target.id = "staging" mock_target.id = "staging"
mock_deps["config"].get_environments.return_value = [mock_source, mock_target] mock_deps["config"].get_environments.return_value = [mock_source, mock_target]
mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[ mock_deps["mapping"].get_suggestions = AsyncMock(
return_value=[
{ {
"source_db": "old_sales", "source_db": "old_sales",
"target_db": "new_sales", "target_db": "new_sales",
"source_db_uuid": "uuid-1", "source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2", "target_db_uuid": "uuid-2",
"confidence": 0.95 "confidence": 0.95,
} }
]) ]
)
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging") response = client.get(
"/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging"
)
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
@@ -433,17 +483,23 @@ def test_get_database_mappings_success(mock_deps):
# [DEF:test_get_database_mappings_env_not_found:Function] # [DEF:test_get_database_mappings_env_not_found:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate database mapping suggestions return 404 when either environment is missing. # @PURPOSE: Validate database mapping suggestions return 404 when either environment is missing.
# @PRE: source_env_id and target_env_id are valid environment IDs # @PRE: source_env_id and target_env_id are valid environment IDs
def test_get_database_mappings_env_not_found(mock_deps): def test_get_database_mappings_env_not_found(mock_deps):
"""@PRE: source_env_id must be a valid environment.""" """@PRE: source_env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = [] mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t") response = client.get(
"/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t"
)
assert response.status_code == 404 assert response.status_code == 404
# [/DEF:test_get_database_mappings_env_not_found:Function] # [/DEF:test_get_database_mappings_env_not_found:Function]
# [DEF:test_get_dashboard_tasks_history_filters_success:Function] # [DEF:test_get_dashboard_tasks_history_filters_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboard task history returns only related backup and LLM tasks. # @PURPOSE: Validate dashboard task history returns only related backup and LLM tasks.
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard # @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
def test_get_dashboard_tasks_history_filters_success(mock_deps): def test_get_dashboard_tasks_history_filters_success(mock_deps):
@@ -484,11 +540,17 @@ def test_get_dashboard_tasks_history_filters_success(mock_deps):
data = response.json() data = response.json()
assert data["dashboard_id"] == 42 assert data["dashboard_id"] == 42
assert len(data["items"]) == 2 assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"} assert {item["plugin_id"] for item in data["items"]} == {
"llm_dashboard_validation",
"superset-backup",
}
# [/DEF:test_get_dashboard_tasks_history_filters_success:Function] # [/DEF:test_get_dashboard_tasks_history_filters_success:Function]
# [DEF:test_get_dashboard_thumbnail_success:Function] # [DEF:test_get_dashboard_thumbnail_success:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Validate dashboard thumbnail endpoint proxies image bytes and content type from Superset. # @PURPOSE: Validate dashboard thumbnail endpoint proxies image bytes and content type from Superset.
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset # @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
def test_get_dashboard_thumbnail_success(mock_deps): def test_get_dashboard_thumbnail_success(mock_deps):
@@ -516,26 +578,34 @@ def test_get_dashboard_thumbnail_success(mock_deps):
assert response.status_code == 200 assert response.status_code == 200
assert response.content == b"fake-image-bytes" assert response.content == b"fake-image-bytes"
assert response.headers["content-type"].startswith("image/png") assert response.headers["content-type"].startswith("image/png")
# [/DEF:test_get_dashboard_thumbnail_success:Function] # [/DEF:test_get_dashboard_thumbnail_success:Function]
# [DEF:_build_profile_preference_stub:Function] # [DEF:_build_profile_preference_stub:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Creates profile preference payload stub for dashboards filter contract tests. # @PURPOSE: Creates profile preference payload stub for dashboards filter contract tests.
# @PRE: username can be empty; enabled indicates profile-default toggle state. # @PRE: username can be empty; enabled indicates profile-default toggle state.
# @POST: Returns object compatible with ProfileService.get_my_preference contract. # @POST: Returns object compatible with ProfileService.get_my_preference contract.
def _build_profile_preference_stub(username: str, enabled: bool): def _build_profile_preference_stub(username: str, enabled: bool):
preference = MagicMock() preference = MagicMock()
preference.superset_username = username preference.superset_username = username
preference.superset_username_normalized = str(username or "").strip().lower() or None preference.superset_username_normalized = (
str(username or "").strip().lower() or None
)
preference.show_only_my_dashboards = bool(enabled) preference.show_only_my_dashboards = bool(enabled)
payload = MagicMock() payload = MagicMock()
payload.preference = preference payload.preference = preference
return payload return payload
# [/DEF:_build_profile_preference_stub:Function] # [/DEF:_build_profile_preference_stub:Function]
# [DEF:_matches_actor_case_insensitive:Function] # [DEF:_matches_actor_case_insensitive:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @PURPOSE: Applies trim + case-insensitive owners OR modified_by matching used by route contract tests. # @PURPOSE: Applies trim + case-insensitive owners OR modified_by matching used by route contract tests.
# @PRE: owners can be None or list-like values. # @PRE: owners can be None or list-like values.
# @POST: Returns True when bound username matches any owner or modified_by. # @POST: Returns True when bound username matches any owner or modified_by.
@@ -551,11 +621,16 @@ def _matches_actor_case_insensitive(bound_username, owners, modified_by):
owner_tokens.append(token) owner_tokens.append(token)
modified_token = str(modified_by or "").strip().lower() modified_token = str(modified_by or "").strip().lower()
return normalized_bound in owner_tokens or bool(modified_token and modified_token == normalized_bound) return normalized_bound in owner_tokens or bool(
modified_token and modified_token == normalized_bound
)
# [/DEF:_matches_actor_case_insensitive:Function] # [/DEF:_matches_actor_case_insensitive:Function]
# [DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function] # [DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards applies profile-default filter with owners OR modified_by trim+case-insensitive semantics. # @TEST: GET /api/dashboards applies profile-default filter with owners OR modified_by trim+case-insensitive semantics.
# @PURPOSE: Validate profile-default filtering matches owner and modifier aliases using normalized Superset actor values. # @PURPOSE: Validate profile-default filtering matches owner and modifier aliases using normalized Superset actor values.
# @PRE: Current user has enabled profile-default preference and bound username. # @PRE: Current user has enabled profile-default preference and bound username.
@@ -565,7 +640,8 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
return_value=[
{ {
"id": 1, "id": 1,
"title": "Owner Match", "title": "Owner Match",
@@ -587,7 +663,8 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
"owners": ["another-user"], "owners": ["another-user"],
"modified_by": "nobody", "modified_by": "nobody",
}, },
]) ]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls: with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
profile_service = MagicMock() profile_service = MagicMock()
@@ -595,7 +672,9 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
username=" JOHN_DOE ", username=" JOHN_DOE ",
enabled=True, enabled=True,
) )
profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive profile_service.matches_dashboard_actor.side_effect = (
_matches_actor_case_insensitive
)
profile_service_cls.return_value = profile_service profile_service_cls.return_value = profile_service
response = client.get( response = client.get(
@@ -612,10 +691,13 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
assert payload["effective_profile_filter"]["override_show_all"] is False assert payload["effective_profile_filter"]["override_show_all"] is False
assert payload["effective_profile_filter"]["username"] == "john_doe" assert payload["effective_profile_filter"]["username"] == "john_doe"
assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by" assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by"
# [/DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function] # [/DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
# [DEF:test_get_dashboards_override_show_all_contract:Function] # [DEF:test_get_dashboards_override_show_all_contract:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards honors override_show_all and disables profile-default filter for current page. # @TEST: GET /api/dashboards honors override_show_all and disables profile-default filter for current page.
# @PURPOSE: Validate override_show_all bypasses profile-default filtering without changing dashboard list semantics. # @PURPOSE: Validate override_show_all bypasses profile-default filtering without changing dashboard list semantics.
# @PRE: Profile-default preference exists but override_show_all=true query is provided. # @PRE: Profile-default preference exists but override_show_all=true query is provided.
@@ -625,10 +707,24 @@ def test_get_dashboards_override_show_all_contract(mock_deps):
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
{"id": 1, "title": "Dash A", "slug": "dash-a", "owners": ["john_doe"], "modified_by": "john_doe"}, return_value=[
{"id": 2, "title": "Dash B", "slug": "dash-b", "owners": ["other"], "modified_by": "other"}, {
]) "id": 1,
"title": "Dash A",
"slug": "dash-a",
"owners": ["john_doe"],
"modified_by": "john_doe",
},
{
"id": 2,
"title": "Dash B",
"slug": "dash-b",
"owners": ["other"],
"modified_by": "other",
},
]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls: with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
profile_service = MagicMock() profile_service = MagicMock()
@@ -636,7 +732,9 @@ def test_get_dashboards_override_show_all_contract(mock_deps):
username="john_doe", username="john_doe",
enabled=True, enabled=True,
) )
profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive profile_service.matches_dashboard_actor.side_effect = (
_matches_actor_case_insensitive
)
profile_service_cls.return_value = profile_service profile_service_cls.return_value = profile_service
response = client.get( response = client.get(
@@ -654,10 +752,13 @@ def test_get_dashboards_override_show_all_contract(mock_deps):
assert payload["effective_profile_filter"]["username"] is None assert payload["effective_profile_filter"]["username"] is None
assert payload["effective_profile_filter"]["match_logic"] is None assert payload["effective_profile_filter"]["match_logic"] is None
profile_service.matches_dashboard_actor.assert_not_called() profile_service.matches_dashboard_actor.assert_not_called()
# [/DEF:test_get_dashboards_override_show_all_contract:Function] # [/DEF:test_get_dashboards_override_show_all_contract:Function]
# [DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function] # [DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards returns empty result set when profile-default filter is active and no dashboard actors match. # @TEST: GET /api/dashboards returns empty result set when profile-default filter is active and no dashboard actors match.
# @PURPOSE: Validate profile-default filtering returns an empty dashboard page when no actor aliases match the bound user. # @PURPOSE: Validate profile-default filtering returns an empty dashboard page when no actor aliases match the bound user.
# @PRE: Profile-default preference is enabled with bound username and all dashboards are non-matching. # @PRE: Profile-default preference is enabled with bound username and all dashboards are non-matching.
@@ -667,7 +768,8 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
return_value=[
{ {
"id": 101, "id": 101,
"title": "Team Dashboard", "title": "Team Dashboard",
@@ -682,7 +784,8 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
"owners": ["ops-user"], "owners": ["ops-user"],
"modified_by": "ops-user", "modified_by": "ops-user",
}, },
]) ]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls: with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
profile_service = MagicMock() profile_service = MagicMock()
@@ -690,7 +793,9 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
username="john_doe", username="john_doe",
enabled=True, enabled=True,
) )
profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive profile_service.matches_dashboard_actor.side_effect = (
_matches_actor_case_insensitive
)
profile_service_cls.return_value = profile_service profile_service_cls.return_value = profile_service
response = client.get( response = client.get(
@@ -710,10 +815,13 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
assert payload["effective_profile_filter"]["override_show_all"] is False assert payload["effective_profile_filter"]["override_show_all"] is False
assert payload["effective_profile_filter"]["username"] == "john_doe" assert payload["effective_profile_filter"]["username"] == "john_doe"
assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by" assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by"
# [/DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function] # [/DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
# [DEF:test_get_dashboards_page_context_other_disables_profile_default:Function] # [DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards does not auto-apply profile-default filter outside dashboards_main page context. # @TEST: GET /api/dashboards does not auto-apply profile-default filter outside dashboards_main page context.
# @PURPOSE: Validate non-dashboard page contexts suppress profile-default filtering and preserve unfiltered results. # @PURPOSE: Validate non-dashboard page contexts suppress profile-default filtering and preserve unfiltered results.
# @PRE: Profile-default preference exists but page_context=other query is provided. # @PRE: Profile-default preference exists but page_context=other query is provided.
@@ -723,10 +831,24 @@ def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
{"id": 1, "title": "Dash A", "slug": "dash-a", "owners": ["john_doe"], "modified_by": "john_doe"}, return_value=[
{"id": 2, "title": "Dash B", "slug": "dash-b", "owners": ["other"], "modified_by": "other"}, {
]) "id": 1,
"title": "Dash A",
"slug": "dash-a",
"owners": ["john_doe"],
"modified_by": "john_doe",
},
{
"id": 2,
"title": "Dash B",
"slug": "dash-b",
"owners": ["other"],
"modified_by": "other",
},
]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls: with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
profile_service = MagicMock() profile_service = MagicMock()
@@ -734,7 +856,9 @@ def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
username="john_doe", username="john_doe",
enabled=True, enabled=True,
) )
profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive profile_service.matches_dashboard_actor.side_effect = (
_matches_actor_case_insensitive
)
profile_service_cls.return_value = profile_service profile_service_cls.return_value = profile_service
response = client.get( response = client.get(
@@ -752,20 +876,26 @@ def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
assert payload["effective_profile_filter"]["username"] is None assert payload["effective_profile_filter"]["username"] is None
assert payload["effective_profile_filter"]["match_logic"] is None assert payload["effective_profile_filter"]["match_logic"] is None
profile_service.matches_dashboard_actor.assert_not_called() profile_service.matches_dashboard_actor.assert_not_called()
# [/DEF:test_get_dashboards_page_context_other_disables_profile_default:Function] # [/DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
# [DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function] # [DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards resolves Superset display-name alias once and filters without per-dashboard detail calls. # @TEST: GET /api/dashboards resolves Superset display-name alias once and filters without per-dashboard detail calls.
# @PURPOSE: Validate profile-default filtering reuses resolved Superset display aliases without triggering per-dashboard detail fanout. # @PURPOSE: Validate profile-default filtering reuses resolved Superset display aliases without triggering per-dashboard detail fanout.
# @PRE: Profile-default filter is active, bound username is `admin`, dashboard actors contain display labels. # @PRE: Profile-default filter is active, bound username is `admin`, dashboard actors contain display labels.
# @POST: Route matches by alias (`Superset Admin`) and does not call `SupersetClient.get_dashboard` in list filter path. # @POST: Route matches by alias (`Superset Admin`) and does not call `SupersetClient.get_dashboard` in list filter path.
def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout(mock_deps): def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout(
mock_deps,
):
mock_env = MagicMock() mock_env = MagicMock()
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
return_value=[
{ {
"id": 5, "id": 5,
"title": "Alias Match", "title": "Alias Match",
@@ -782,19 +912,24 @@ def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fano
"created_by": None, "created_by": None,
"modified_by": "Other User", "modified_by": "Other User",
}, },
]) ]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls, patch( with (
"src.api.routes.dashboards.SupersetClient" patch("src.api.routes.dashboards.ProfileService") as profile_service_cls,
) as superset_client_cls, patch( patch("src.api.routes.dashboards.SupersetClient") as superset_client_cls,
patch(
"src.api.routes.dashboards.SupersetAccountLookupAdapter" "src.api.routes.dashboards.SupersetAccountLookupAdapter"
) as lookup_adapter_cls: ) as lookup_adapter_cls,
):
profile_service = MagicMock() profile_service = MagicMock()
profile_service.get_my_preference.return_value = _build_profile_preference_stub( profile_service.get_my_preference.return_value = _build_profile_preference_stub(
username="admin", username="admin",
enabled=True, enabled=True,
) )
profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive profile_service.matches_dashboard_actor.side_effect = (
_matches_actor_case_insensitive
)
profile_service_cls.return_value = profile_service profile_service_cls.return_value = profile_service
superset_client = MagicMock() superset_client = MagicMock()
@@ -826,10 +961,13 @@ def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fano
assert payload["effective_profile_filter"]["applied"] is True assert payload["effective_profile_filter"]["applied"] is True
lookup_adapter.get_users_page.assert_called_once() lookup_adapter.get_users_page.assert_called_once()
superset_client.get_dashboard.assert_not_called() superset_client.get_dashboard.assert_not_called()
# [/DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function] # [/DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
# [DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function] # [DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]
# @RELATION: BINDS_TO -> DashboardsApiTests
# @TEST: GET /api/dashboards profile-default filter matches Superset owner object payloads. # @TEST: GET /api/dashboards profile-default filter matches Superset owner object payloads.
# @PURPOSE: Validate profile-default filtering accepts owner object payloads once aliases resolve to the bound Superset username. # @PURPOSE: Validate profile-default filtering accepts owner object payloads once aliases resolve to the bound Superset username.
# @PRE: Profile-default preference is enabled and owners list contains dict payloads. # @PRE: Profile-default preference is enabled and owners list contains dict payloads.
@@ -839,7 +977,8 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
mock_env.id = "prod" mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = [] mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ mock_deps["resource"].get_dashboards_with_status = AsyncMock(
return_value=[
{ {
"id": 701, "id": 701,
"title": "Featured Charts", "title": "Featured Charts",
@@ -870,11 +1009,15 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
], ],
"modified_by": "other_user", "modified_by": "other_user",
}, },
]) ]
)
with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls, patch( with (
patch("src.api.routes.dashboards.ProfileService") as profile_service_cls,
patch(
"src.api.routes.dashboards._resolve_profile_actor_aliases", "src.api.routes.dashboards._resolve_profile_actor_aliases",
return_value=["user_1"], return_value=["user_1"],
),
): ):
profile_service = MagicMock(spec=DomainProfileService) profile_service = MagicMock(spec=DomainProfileService)
profile_service.get_my_preference.return_value = _build_profile_preference_stub( profile_service.get_my_preference.return_value = _build_profile_preference_stub(
@@ -883,7 +1026,8 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
) )
profile_service.matches_dashboard_actor.side_effect = ( profile_service.matches_dashboard_actor.side_effect = (
lambda bound_username, owners, modified_by: any( lambda bound_username, owners, modified_by: any(
str(owner.get("email", "")).split("@", 1)[0].strip().lower() == str(bound_username).strip().lower() str(owner.get("email", "")).split("@", 1)[0].strip().lower()
== str(bound_username).strip().lower()
for owner in (owners or []) for owner in (owners or [])
if isinstance(owner, dict) if isinstance(owner, dict)
) )
@@ -899,6 +1043,8 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
assert payload["total"] == 1 assert payload["total"] == 1
assert {item["id"] for item in payload["dashboards"]} == {701} assert {item["id"] for item in payload["dashboards"]} == {701}
assert payload["dashboards"][0]["title"] == "Featured Charts" assert payload["dashboards"][0]["title"] == "Featured Charts"
# [/DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function] # [/DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]

View File

@@ -71,6 +71,7 @@ client = TestClient(app)
# [DEF:_make_user:Function] # [DEF:_make_user:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_user(): def _make_user():
permissions = [ permissions = [
SimpleNamespace(resource="dataset:session", action="READ"), SimpleNamespace(resource="dataset:session", action="READ"),
@@ -83,6 +84,7 @@ def _make_user():
# [DEF:_make_config_manager:Function] # [DEF:_make_config_manager:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_config_manager(): def _make_config_manager():
env = Environment( env = Environment(
id="env-1", id="env-1",
@@ -100,6 +102,7 @@ def _make_config_manager():
# [DEF:_make_session:Function] # [DEF:_make_session:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_session(): def _make_session():
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
return DatasetReviewSession( return DatasetReviewSession(
@@ -123,6 +126,7 @@ def _make_session():
# [DEF:_make_us2_session:Function] # [DEF:_make_us2_session:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_us2_session(): def _make_us2_session():
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
session = _make_session() session = _make_session()
@@ -238,6 +242,7 @@ def _make_us2_session():
# [DEF:_make_us3_session:Function] # [DEF:_make_us3_session:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_us3_session(): def _make_us3_session():
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
session = _make_session() session = _make_session()
@@ -300,6 +305,7 @@ def _make_us3_session():
# [DEF:_make_preview_ready_session:Function] # [DEF:_make_preview_ready_session:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
def _make_preview_ready_session(): def _make_preview_ready_session():
session = _make_us3_session() session = _make_us3_session()
session.readiness_state = ReadinessState.COMPILED_PREVIEW_READY session.readiness_state = ReadinessState.COMPILED_PREVIEW_READY
@@ -310,6 +316,7 @@ def _make_preview_ready_session():
# [DEF:dataset_review_api_dependencies:Function] # [DEF:dataset_review_api_dependencies:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def dataset_review_api_dependencies(): def dataset_review_api_dependencies():
mock_user = _make_user() mock_user = _make_user()
@@ -330,6 +337,7 @@ def dataset_review_api_dependencies():
# [DEF:test_parse_superset_link_dashboard_partial_recovery:Function] # [DEF:test_parse_superset_link_dashboard_partial_recovery:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify dashboard links recover dataset context and preserve explicit partial-recovery markers. # @PURPOSE: Verify dashboard links recover dataset context and preserve explicit partial-recovery markers.
def test_parse_superset_link_dashboard_partial_recovery(): def test_parse_superset_link_dashboard_partial_recovery():
env = Environment( env = Environment(
@@ -364,6 +372,7 @@ def test_parse_superset_link_dashboard_partial_recovery():
# [DEF:test_parse_superset_link_dashboard_slug_recovery:Function] # [DEF:test_parse_superset_link_dashboard_slug_recovery:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify dashboard slug links resolve through dashboard detail endpoints and recover dataset context. # @PURPOSE: Verify dashboard slug links resolve through dashboard detail endpoints and recover dataset context.
def test_parse_superset_link_dashboard_slug_recovery(): def test_parse_superset_link_dashboard_slug_recovery():
env = Environment( env = Environment(
@@ -398,6 +407,7 @@ def test_parse_superset_link_dashboard_slug_recovery():
# [DEF:test_parse_superset_link_dashboard_permalink_partial_recovery:Function] # [DEF:test_parse_superset_link_dashboard_permalink_partial_recovery:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify dashboard permalink links no longer fail parsing and preserve permalink filter state for partial recovery. # @PURPOSE: Verify dashboard permalink links no longer fail parsing and preserve permalink filter state for partial recovery.
def test_parse_superset_link_dashboard_permalink_partial_recovery(): def test_parse_superset_link_dashboard_permalink_partial_recovery():
env = Environment( env = Environment(
@@ -442,6 +452,7 @@ def test_parse_superset_link_dashboard_permalink_partial_recovery():
# [DEF:test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_dashboard_state:Function] # [DEF:test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_dashboard_state:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify permalink state with nested dashboard id recovers dataset binding and keeps imported filters. # @PURPOSE: Verify permalink state with nested dashboard id recovers dataset binding and keeps imported filters.
def test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_dashboard_state(): def test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_dashboard_state():
env = Environment( env = Environment(
@@ -481,6 +492,7 @@ def test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_da
# [DEF:test_resolve_from_dictionary_prefers_exact_match:Function] # [DEF:test_resolve_from_dictionary_prefers_exact_match:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify trusted dictionary exact matches outrank fuzzy candidates and unresolved fields stay explicit. # @PURPOSE: Verify trusted dictionary exact matches outrank fuzzy candidates and unresolved fields stay explicit.
def test_resolve_from_dictionary_prefers_exact_match(): def test_resolve_from_dictionary_prefers_exact_match():
resolver = SemanticSourceResolver() resolver = SemanticSourceResolver()
@@ -519,6 +531,7 @@ def test_resolve_from_dictionary_prefers_exact_match():
# [DEF:test_orchestrator_start_session_preserves_partial_recovery:Function] # [DEF:test_orchestrator_start_session_preserves_partial_recovery:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify session start persists usable recovery-required state when Superset intake is partial. # @PURPOSE: Verify session start persists usable recovery-required state when Superset intake is partial.
def test_orchestrator_start_session_preserves_partial_recovery(dataset_review_api_dependencies): def test_orchestrator_start_session_preserves_partial_recovery(dataset_review_api_dependencies):
repository = MagicMock() repository = MagicMock()
@@ -580,6 +593,7 @@ def test_orchestrator_start_session_preserves_partial_recovery(dataset_review_ap
# [DEF:test_orchestrator_start_session_bootstraps_recovery_state:Function] # [DEF:test_orchestrator_start_session_bootstraps_recovery_state:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify session start persists recovered filters, template variables, and initial execution mappings for review workspace bootstrap. # @PURPOSE: Verify session start persists recovered filters, template variables, and initial execution mappings for review workspace bootstrap.
def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api_dependencies): def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api_dependencies):
repository = MagicMock() repository = MagicMock()
@@ -677,6 +691,7 @@ def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api
# [DEF:test_start_session_endpoint_returns_created_summary:Function] # [DEF:test_start_session_endpoint_returns_created_summary:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify POST session lifecycle endpoint returns a persisted ownership-scoped summary. # @PURPOSE: Verify POST session lifecycle endpoint returns a persisted ownership-scoped summary.
def test_start_session_endpoint_returns_created_summary(dataset_review_api_dependencies): def test_start_session_endpoint_returns_created_summary(dataset_review_api_dependencies):
session = _make_session() session = _make_session()
@@ -703,6 +718,7 @@ def test_start_session_endpoint_returns_created_summary(dataset_review_api_depen
# [DEF:test_get_session_detail_export_and_lifecycle_endpoints:Function] # [DEF:test_get_session_detail_export_and_lifecycle_endpoints:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify lifecycle get/patch/delete plus documentation and validation exports remain ownership-scoped and usable. # @PURPOSE: Verify lifecycle get/patch/delete plus documentation and validation exports remain ownership-scoped and usable.
def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_dependencies): def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_dependencies):
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
@@ -802,6 +818,7 @@ def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_de
# [DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function] # [DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Clarification endpoints should expose one current question, persist the answer before advancement, and store feedback on the answer audit record. # @PURPOSE: Clarification endpoints should expose one current question, persist the answer before advancement, and store feedback on the answer audit record.
def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_api_dependencies): def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_api_dependencies):
session = _make_us2_session() session = _make_us2_session()
@@ -853,6 +870,7 @@ def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_
# [DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function] # [DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Semantic field endpoints should apply manual overrides with lock/provenance invariants and persist feedback independently. # @PURPOSE: Semantic field endpoints should apply manual overrides with lock/provenance invariants and persist feedback independently.
def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api_dependencies): def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api_dependencies):
session = _make_us2_session() session = _make_us2_session()
@@ -913,6 +931,7 @@ def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api
# [DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function] # [DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: US3 execution endpoints should persist manual overrides, preserve explicit approval semantics, return contract-shaped preview truth, and expose audited launch handoff. # @PURPOSE: US3 execution endpoints should persist manual overrides, preserve explicit approval semantics, return contract-shaped preview truth, and expose audited launch handoff.
def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_api_dependencies): def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_api_dependencies):
session = _make_us3_session() session = _make_us3_session()
@@ -1067,6 +1086,7 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
# [DEF:test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not_found_contract_drift:Function] # [DEF:test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not_found_contract_drift:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Preview endpoint should preserve API contract and surface generic upstream preview failures without fabricating dashboard-not-found semantics for non-dashboard 404s. # @PURPOSE: Preview endpoint should preserve API contract and surface generic upstream preview failures without fabricating dashboard-not-found semantics for non-dashboard 404s.
def test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not_found_contract_drift( def test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not_found_contract_drift(
dataset_review_api_dependencies, dataset_review_api_dependencies,
@@ -1115,6 +1135,7 @@ def test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not
# [DEF:test_execution_snapshot_includes_recovered_imported_filters_without_template_mapping:Function] # [DEF:test_execution_snapshot_includes_recovered_imported_filters_without_template_mapping:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Recovered imported filters with values should flow into preview filter context even when no template variable mapping exists. # @PURPOSE: Recovered imported filters with values should flow into preview filter context even when no template variable mapping exists.
def test_execution_snapshot_includes_recovered_imported_filters_without_template_mapping( def test_execution_snapshot_includes_recovered_imported_filters_without_template_mapping(
dataset_review_api_dependencies, dataset_review_api_dependencies,
@@ -1175,6 +1196,7 @@ def test_execution_snapshot_includes_recovered_imported_filters_without_template
# [DEF:test_execution_snapshot_preserves_mapped_template_variables_and_filter_context:Function] # [DEF:test_execution_snapshot_preserves_mapped_template_variables_and_filter_context:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Mapped template variables should still populate template params while contributing their effective filter context. # @PURPOSE: Mapped template variables should still populate template params while contributing their effective filter context.
def test_execution_snapshot_preserves_mapped_template_variables_and_filter_context( def test_execution_snapshot_preserves_mapped_template_variables_and_filter_context(
dataset_review_api_dependencies, dataset_review_api_dependencies,
@@ -1209,6 +1231,7 @@ def test_execution_snapshot_preserves_mapped_template_variables_and_filter_conte
# [DEF:test_execution_snapshot_skips_partial_imported_filters_without_values:Function] # [DEF:test_execution_snapshot_skips_partial_imported_filters_without_values:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Partial imported filters without raw or normalized values must not emit bogus active preview filters. # @PURPOSE: Partial imported filters without raw or normalized values must not emit bogus active preview filters.
def test_execution_snapshot_skips_partial_imported_filters_without_values( def test_execution_snapshot_skips_partial_imported_filters_without_values(
dataset_review_api_dependencies, dataset_review_api_dependencies,
@@ -1246,6 +1269,7 @@ def test_execution_snapshot_skips_partial_imported_filters_without_values(
# [DEF:test_us3_launch_endpoint_requires_launch_permission:Function] # [DEF:test_us3_launch_endpoint_requires_launch_permission:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Launch endpoint should enforce the contract RBAC permission instead of the generic session-manage permission. # @PURPOSE: Launch endpoint should enforce the contract RBAC permission instead of the generic session-manage permission.
def test_us3_launch_endpoint_requires_launch_permission(dataset_review_api_dependencies): def test_us3_launch_endpoint_requires_launch_permission(dataset_review_api_dependencies):
session = _make_us3_session() session = _make_us3_session()
@@ -1293,6 +1317,7 @@ def test_us3_launch_endpoint_requires_launch_permission(dataset_review_api_depen
# [/DEF:test_us3_launch_endpoint_requires_launch_permission:Function] # [/DEF:test_us3_launch_endpoint_requires_launch_permission:Function]
# [DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function] # [DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Updated semantic source versions should mark unlocked fields reviewable while preserving locked manual values. # @PURPOSE: Updated semantic source versions should mark unlocked fields reviewable while preserving locked manual values.
def test_semantic_source_version_propagation_preserves_locked_fields(): def test_semantic_source_version_propagation_preserves_locked_fields():
resolver = SemanticSourceResolver() resolver = SemanticSourceResolver()

View File

@@ -51,10 +51,13 @@ client = TestClient(app)
# [DEF:test_get_datasets_success:Function] # [DEF:test_get_datasets_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate successful datasets listing contract for an existing environment. # @PURPOSE: Validate successful datasets listing contract for an existing environment.
# @TEST: GET /api/datasets returns 200 and valid schema # @TEST: GET /api/datasets returns 200 and valid schema
# @PRE: env_id exists # @PRE: env_id exists
# @POST: Response matches DatasetsResponse schema # @POST: Response matches DatasetsResponse schema
# [DEF:test_get_datasets_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_get_datasets_success(mock_deps): def test_get_datasets_success(mock_deps):
# Mock environment # Mock environment
mock_env = MagicMock() mock_env = MagicMock()
@@ -89,10 +92,15 @@ def test_get_datasets_success(mock_deps):
# [DEF:test_get_datasets_env_not_found:Function] # [DEF:test_get_datasets_env_not_found:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate datasets listing returns 404 when the requested environment does not exist. # @PURPOSE: Validate datasets listing returns 404 when the requested environment does not exist.
# @TEST: GET /api/datasets returns 404 if env_id missing # @TEST: GET /api/datasets returns 404 if env_id missing
# @PRE: env_id does not exist # @PRE: env_id does not exist
# @POST: Returns 404 error # @POST: Returns 404 error
# [/DEF:test_get_datasets_success:Function]
# [DEF:test_get_datasets_env_not_found:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_get_datasets_env_not_found(mock_deps): def test_get_datasets_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = [] mock_deps["config"].get_environments.return_value = []
@@ -106,10 +114,15 @@ def test_get_datasets_env_not_found(mock_deps):
# [DEF:test_get_datasets_invalid_pagination:Function] # [DEF:test_get_datasets_invalid_pagination:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate datasets listing rejects invalid pagination parameters with 400 responses. # @PURPOSE: Validate datasets listing rejects invalid pagination parameters with 400 responses.
# @TEST: GET /api/datasets returns 400 for invalid page/page_size # @TEST: GET /api/datasets returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100 # @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error # @POST: Returns 400 error
# [/DEF:test_get_datasets_env_not_found:Function]
# [DEF:test_get_datasets_invalid_pagination:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_get_datasets_invalid_pagination(mock_deps): def test_get_datasets_invalid_pagination(mock_deps):
mock_env = MagicMock() mock_env = MagicMock()
mock_env.id = "prod" mock_env.id = "prod"
@@ -135,10 +148,15 @@ def test_get_datasets_invalid_pagination(mock_deps):
# [DEF:test_map_columns_success:Function] # [DEF:test_map_columns_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate map-columns request creates an async mapping task and returns its identifier. # @PURPOSE: Validate map-columns request creates an async mapping task and returns its identifier.
# @TEST: POST /api/datasets/map-columns creates mapping task # @TEST: POST /api/datasets/map-columns creates mapping task
# @PRE: Valid env_id, dataset_ids, source_type # @PRE: Valid env_id, dataset_ids, source_type
# @POST: Returns task_id # @POST: Returns task_id
# [/DEF:test_get_datasets_invalid_pagination:Function]
# [DEF:test_map_columns_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_map_columns_success(mock_deps): def test_map_columns_success(mock_deps):
# Mock environment # Mock environment
mock_env = MagicMock() mock_env = MagicMock()
@@ -170,10 +188,15 @@ def test_map_columns_success(mock_deps):
# [DEF:test_map_columns_invalid_source_type:Function] # [DEF:test_map_columns_invalid_source_type:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate map-columns rejects unsupported source types with a 400 contract response. # @PURPOSE: Validate map-columns rejects unsupported source types with a 400 contract response.
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type # @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
# @PRE: source_type is not 'postgresql' or 'xlsx' # @PRE: source_type is not 'postgresql' or 'xlsx'
# @POST: Returns 400 error # @POST: Returns 400 error
# [/DEF:test_map_columns_success:Function]
# [DEF:test_map_columns_invalid_source_type:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_map_columns_invalid_source_type(mock_deps): def test_map_columns_invalid_source_type(mock_deps):
response = client.post( response = client.post(
"/api/datasets/map-columns", "/api/datasets/map-columns",
@@ -192,10 +215,15 @@ def test_map_columns_invalid_source_type(mock_deps):
# [DEF:test_generate_docs_success:Function] # [DEF:test_generate_docs_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @TEST: POST /api/datasets/generate-docs creates doc generation task # @TEST: POST /api/datasets/generate-docs creates doc generation task
# @PRE: Valid env_id, dataset_ids, llm_provider # @PRE: Valid env_id, dataset_ids, llm_provider
# @PURPOSE: Validate generate-docs request creates an async documentation task and returns its identifier. # @PURPOSE: Validate generate-docs request creates an async documentation task and returns its identifier.
# @POST: Returns task_id # @POST: Returns task_id
# [/DEF:test_map_columns_invalid_source_type:Function]
# [DEF:test_generate_docs_success:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_generate_docs_success(mock_deps): def test_generate_docs_success(mock_deps):
# Mock environment # Mock environment
mock_env = MagicMock() mock_env = MagicMock()
@@ -227,10 +255,15 @@ def test_generate_docs_success(mock_deps):
# [DEF:test_map_columns_empty_ids:Function] # [DEF:test_map_columns_empty_ids:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate map-columns rejects empty dataset identifier lists. # @PURPOSE: Validate map-columns rejects empty dataset identifier lists.
# @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids # @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty # @PRE: dataset_ids is empty
# @POST: Returns 400 error # @POST: Returns 400 error
# [/DEF:test_generate_docs_success:Function]
# [DEF:test_map_columns_empty_ids:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_map_columns_empty_ids(mock_deps): def test_map_columns_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty.""" """@PRE: dataset_ids must be non-empty."""
response = client.post( response = client.post(
@@ -247,10 +280,15 @@ def test_map_columns_empty_ids(mock_deps):
# [DEF:test_generate_docs_empty_ids:Function] # [DEF:test_generate_docs_empty_ids:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate generate-docs rejects empty dataset identifier lists. # @PURPOSE: Validate generate-docs rejects empty dataset identifier lists.
# @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids # @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty # @PRE: dataset_ids is empty
# @POST: Returns 400 error # @POST: Returns 400 error
# [/DEF:test_map_columns_empty_ids:Function]
# [DEF:test_generate_docs_empty_ids:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_generate_docs_empty_ids(mock_deps): def test_generate_docs_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty.""" """@PRE: dataset_ids must be non-empty."""
response = client.post( response = client.post(
@@ -267,10 +305,15 @@ def test_generate_docs_empty_ids(mock_deps):
# [DEF:test_generate_docs_env_not_found:Function] # [DEF:test_generate_docs_env_not_found:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @TEST: POST /api/datasets/generate-docs returns 404 for missing env # @TEST: POST /api/datasets/generate-docs returns 404 for missing env
# @PRE: env_id does not exist # @PRE: env_id does not exist
# @PURPOSE: Validate generate-docs returns 404 when the requested environment cannot be resolved. # @PURPOSE: Validate generate-docs returns 404 when the requested environment cannot be resolved.
# @POST: Returns 404 error # @POST: Returns 404 error
# [/DEF:test_generate_docs_empty_ids:Function]
# [DEF:test_generate_docs_env_not_found:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
def test_generate_docs_env_not_found(mock_deps): def test_generate_docs_env_not_found(mock_deps):
"""@PRE: env_id must be a valid environment.""" """@PRE: env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = [] mock_deps["config"].get_environments.return_value = []
@@ -288,8 +331,11 @@ def test_generate_docs_env_not_found(mock_deps):
# [DEF:test_get_datasets_superset_failure:Function] # [DEF:test_get_datasets_superset_failure:Function]
# @RELATION: BINDS_TO -> DatasetsApiTests
# @PURPOSE: Validate datasets listing surfaces a 503 contract when Superset access fails. # @PURPOSE: Validate datasets listing surfaces a 503 contract when Superset access fails.
# @TEST_EDGE: external_superset_failure -> {status: 503} # @TEST_EDGE: external_superset_failure -> {status: 503}
# [/DEF:test_generate_docs_env_not_found:Function]
def test_get_datasets_superset_failure(mock_deps): def test_get_datasets_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {status: 503}""" """@TEST_EDGE: external_superset_failure -> {status: 503}"""
mock_env = MagicMock() mock_env = MagicMock()

View File

@@ -11,6 +11,11 @@ from src.api.routes import git as git_routes
from src.models.git import GitServerConfig, GitProvider, GitStatus, GitRepository from src.models.git import GitServerConfig, GitProvider, GitStatus, GitRepository
# [DEF:DbMock:Class]
# @RELATION: BINDS_TO ->[TestGitApi]
# @COMPLEXITY: 2
# @PURPOSE: In-memory session double for git route tests with minimal query/filter persistence semantics.
# @INVARIANT: Supports only the SQLAlchemy-like operations exercised by this test module.
class DbMock: class DbMock:
def __init__(self, data=None): def __init__(self, data=None):
self._data = data or [] self._data = data or []
@@ -79,6 +84,9 @@ class DbMock:
item.last_validated = "2026-03-08T00:00:00Z" item.last_validated = "2026-03-08T00:00:00Z"
# [/DEF:DbMock:Class]
# [DEF:test_get_git_configs_masks_pat:Function] # [DEF:test_get_git_configs_masks_pat:Function]
# @RELATION: BINDS_TO ->[TestGitApi] # @RELATION: BINDS_TO ->[TestGitApi]
def test_get_git_configs_masks_pat(): def test_get_git_configs_masks_pat():

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.api.routes.__tests__.test_git_status_route:Module] # [DEF:TestGitStatusRoute:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, git, api, status, no_repo # @SEMANTICS: tests, git, api, status, no_repo
# @PURPOSE: Validate status endpoint behavior for missing and error repository states. # @PURPOSE: Validate status endpoint behavior for missing and error repository states.
# @LAYER: Domain (Tests) # @LAYER: Domain (Tests)
# @RELATION: VERIFIES -> [backend.src.api.routes.git] # @RELATION: VERIFIES -> [GitApi]
from fastapi import HTTPException from fastapi import HTTPException
import pytest import pytest
@@ -14,6 +14,7 @@ from src.api.routes import git as git_routes
# [DEF:test_get_repository_status_returns_no_repo_payload_for_missing_repo:Function] # [DEF:test_get_repository_status_returns_no_repo_payload_for_missing_repo:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure missing local repository is represented as NO_REPO payload instead of an API error. # @PURPOSE: Ensure missing local repository is represented as NO_REPO payload instead of an API error.
# @PRE: GitService.get_status raises HTTPException(404). # @PRE: GitService.get_status raises HTTPException(404).
# @POST: Route returns a deterministic NO_REPO status payload. # @POST: Route returns a deterministic NO_REPO status payload.
@@ -37,6 +38,7 @@ def test_get_repository_status_returns_no_repo_payload_for_missing_repo(monkeypa
# [DEF:test_get_repository_status_propagates_non_404_http_exception:Function] # [DEF:test_get_repository_status_propagates_non_404_http_exception:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure HTTP exceptions other than 404 are not masked. # @PURPOSE: Ensure HTTP exceptions other than 404 are not masked.
# @PRE: GitService.get_status raises HTTPException with non-404 status. # @PRE: GitService.get_status raises HTTPException with non-404 status.
# @POST: Raised exception preserves original status and detail. # @POST: Raised exception preserves original status and detail.
@@ -60,6 +62,7 @@ def test_get_repository_status_propagates_non_404_http_exception(monkeypatch):
# [DEF:test_get_repository_diff_propagates_http_exception:Function] # [DEF:test_get_repository_diff_propagates_http_exception:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure diff endpoint preserves domain HTTP errors from GitService. # @PURPOSE: Ensure diff endpoint preserves domain HTTP errors from GitService.
# @PRE: GitService.get_diff raises HTTPException. # @PRE: GitService.get_diff raises HTTPException.
# @POST: Endpoint raises same HTTPException values. # @POST: Endpoint raises same HTTPException values.
@@ -79,6 +82,7 @@ def test_get_repository_diff_propagates_http_exception(monkeypatch):
# [DEF:test_get_history_wraps_unexpected_error_as_500:Function] # [DEF:test_get_history_wraps_unexpected_error_as_500:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure non-HTTP exceptions in history endpoint become deterministic 500 errors. # @PURPOSE: Ensure non-HTTP exceptions in history endpoint become deterministic 500 errors.
# @PRE: GitService.get_commit_history raises ValueError. # @PRE: GitService.get_commit_history raises ValueError.
# @POST: Endpoint returns HTTPException with status 500 and route context. # @POST: Endpoint returns HTTPException with status 500 and route context.
@@ -98,6 +102,7 @@ def test_get_history_wraps_unexpected_error_as_500(monkeypatch):
# [DEF:test_commit_changes_wraps_unexpected_error_as_500:Function] # [DEF:test_commit_changes_wraps_unexpected_error_as_500:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure commit endpoint does not leak unexpected errors as 400. # @PURPOSE: Ensure commit endpoint does not leak unexpected errors as 400.
# @PRE: GitService.commit_changes raises RuntimeError. # @PRE: GitService.commit_changes raises RuntimeError.
# @POST: Endpoint raises HTTPException(500) with route context. # @POST: Endpoint raises HTTPException(500) with route context.
@@ -121,6 +126,7 @@ def test_commit_changes_wraps_unexpected_error_as_500(monkeypatch):
# [DEF:test_get_repository_status_batch_returns_mixed_statuses:Function] # [DEF:test_get_repository_status_batch_returns_mixed_statuses:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure batch endpoint returns per-dashboard statuses in one response. # @PURPOSE: Ensure batch endpoint returns per-dashboard statuses in one response.
# @PRE: Some repositories are missing and some are initialized. # @PRE: Some repositories are missing and some are initialized.
# @POST: Returned map includes resolved status for each requested dashboard ID. # @POST: Returned map includes resolved status for each requested dashboard ID.
@@ -148,6 +154,7 @@ def test_get_repository_status_batch_returns_mixed_statuses(monkeypatch):
# [DEF:test_get_repository_status_batch_marks_item_as_error_on_service_failure:Function] # [DEF:test_get_repository_status_batch_marks_item_as_error_on_service_failure:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure batch endpoint marks failed items as ERROR without failing entire request. # @PURPOSE: Ensure batch endpoint marks failed items as ERROR without failing entire request.
# @PRE: GitService raises non-HTTP exception for one dashboard. # @PRE: GitService raises non-HTTP exception for one dashboard.
# @POST: Failed dashboard status is marked as ERROR. # @POST: Failed dashboard status is marked as ERROR.
@@ -173,6 +180,7 @@ def test_get_repository_status_batch_marks_item_as_error_on_service_failure(monk
# [DEF:test_get_repository_status_batch_deduplicates_and_truncates_ids:Function] # [DEF:test_get_repository_status_batch_deduplicates_and_truncates_ids:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure batch endpoint protects server from oversized payloads. # @PURPOSE: Ensure batch endpoint protects server from oversized payloads.
# @PRE: request includes duplicate IDs and more than MAX_REPOSITORY_STATUS_BATCH entries. # @PRE: request includes duplicate IDs and more than MAX_REPOSITORY_STATUS_BATCH entries.
# @POST: Result contains unique IDs up to configured cap. # @POST: Result contains unique IDs up to configured cap.
@@ -198,6 +206,7 @@ def test_get_repository_status_batch_deduplicates_and_truncates_ids(monkeypatch)
# [DEF:test_commit_changes_applies_profile_identity_before_commit:Function] # [DEF:test_commit_changes_applies_profile_identity_before_commit:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure commit route configures repository identity from profile preferences before commit call. # @PURPOSE: Ensure commit route configures repository identity from profile preferences before commit call.
# @PRE: Profile preference contains git_username/git_email for current user. # @PRE: Profile preference contains git_username/git_email for current user.
# @POST: git_service.configure_identity receives resolved identity and commit proceeds. # @POST: git_service.configure_identity receives resolved identity and commit proceeds.
@@ -259,6 +268,7 @@ def test_commit_changes_applies_profile_identity_before_commit(monkeypatch):
# [DEF:test_pull_changes_applies_profile_identity_before_pull:Function] # [DEF:test_pull_changes_applies_profile_identity_before_pull:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure pull route configures repository identity from profile preferences before pull call. # @PURPOSE: Ensure pull route configures repository identity from profile preferences before pull call.
# @PRE: Profile preference contains git_username/git_email for current user. # @PRE: Profile preference contains git_username/git_email for current user.
# @POST: git_service.configure_identity receives resolved identity and pull proceeds. # @POST: git_service.configure_identity receives resolved identity and pull proceeds.
@@ -315,6 +325,7 @@ def test_pull_changes_applies_profile_identity_before_pull(monkeypatch):
# [DEF:test_get_merge_status_returns_service_payload:Function] # [DEF:test_get_merge_status_returns_service_payload:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure merge status route returns service payload as-is. # @PURPOSE: Ensure merge status route returns service payload as-is.
# @PRE: git_service.get_merge_status returns unfinished merge payload. # @PRE: git_service.get_merge_status returns unfinished merge payload.
# @POST: Route response contains has_unfinished_merge=True. # @POST: Route response contains has_unfinished_merge=True.
@@ -347,6 +358,7 @@ def test_get_merge_status_returns_service_payload(monkeypatch):
# [DEF:test_resolve_merge_conflicts_passes_resolution_items_to_service:Function] # [DEF:test_resolve_merge_conflicts_passes_resolution_items_to_service:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure merge resolve route forwards parsed resolutions to service. # @PURPOSE: Ensure merge resolve route forwards parsed resolutions to service.
# @PRE: resolve_data has one file strategy. # @PRE: resolve_data has one file strategy.
# @POST: Service receives normalized list and route returns resolved files. # @POST: Service receives normalized list and route returns resolved files.
@@ -384,6 +396,7 @@ def test_resolve_merge_conflicts_passes_resolution_items_to_service(monkeypatch)
# [DEF:test_abort_merge_calls_service_and_returns_result:Function] # [DEF:test_abort_merge_calls_service_and_returns_result:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure abort route delegates to service. # @PURPOSE: Ensure abort route delegates to service.
# @PRE: Service abort_merge returns aborted status. # @PRE: Service abort_merge returns aborted status.
# @POST: Route returns aborted status. # @POST: Route returns aborted status.
@@ -408,6 +421,7 @@ def test_abort_merge_calls_service_and_returns_result(monkeypatch):
# [DEF:test_continue_merge_passes_message_and_returns_commit:Function] # [DEF:test_continue_merge_passes_message_and_returns_commit:Function]
# @RELATION: BINDS_TO -> TestGitStatusRoute
# @PURPOSE: Ensure continue route passes commit message to service. # @PURPOSE: Ensure continue route passes commit message to service.
# @PRE: continue_data.message is provided. # @PRE: continue_data.message is provided.
# @POST: Route returns committed status and hash. # @POST: Route returns committed status and hash.
@@ -437,4 +451,4 @@ def test_continue_merge_passes_message_and_returns_commit(monkeypatch):
# [/DEF:test_continue_merge_passes_message_and_returns_commit:Function] # [/DEF:test_continue_merge_passes_message_and_returns_commit:Function]
# [/DEF:backend.src.api.routes.__tests__.test_git_status_route:Module] # [/DEF:TestGitStatusRoute:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.__tests__.test_migration_routes:Module] # [DEF:TestMigrationRoutes:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Unit tests for migration API route handlers. # @PURPOSE: Unit tests for migration API route handlers.
@@ -52,6 +52,8 @@ def db_session():
session.close() session.close()
# [DEF:_make_config_manager:Function]
# @RELATION: BINDS_TO -> TestMigrationRoutes
def _make_config_manager(cron="0 2 * * *"): def _make_config_manager(cron="0 2 * * *"):
"""Creates a mock config manager with a realistic AppConfig-like object.""" """Creates a mock config manager with a realistic AppConfig-like object."""
settings = MagicMock() settings = MagicMock()
@@ -66,6 +68,8 @@ def _make_config_manager(cron="0 2 * * *"):
# --- get_migration_settings tests --- # --- get_migration_settings tests ---
# [/DEF:_make_config_manager:Function]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_get_migration_settings_returns_default_cron(): async def test_get_migration_settings_returns_default_cron():
"""Verify the settings endpoint returns the stored cron string.""" """Verify the settings endpoint returns the stored cron string."""
@@ -227,6 +231,8 @@ async def test_get_resource_mappings_filter_by_type(db_session):
# --- trigger_sync_now tests --- # --- trigger_sync_now tests ---
@pytest.fixture @pytest.fixture
# [DEF:_mock_env:Function]
# @RELATION: BINDS_TO -> TestMigrationRoutes
def _mock_env(): def _mock_env():
"""Creates a mock config environment object.""" """Creates a mock config environment object."""
env = MagicMock() env = MagicMock()
@@ -240,6 +246,10 @@ def _mock_env():
return env return env
# [/DEF:_mock_env:Function]
# [DEF:_make_sync_config_manager:Function]
# @RELATION: BINDS_TO -> TestMigrationRoutes
def _make_sync_config_manager(environments): def _make_sync_config_manager(environments):
"""Creates a mock config manager with environments list.""" """Creates a mock config manager with environments list."""
settings = MagicMock() settings = MagicMock()
@@ -253,6 +263,8 @@ def _make_sync_config_manager(environments):
return cm return cm
# [/DEF:_make_sync_config_manager:Function]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_trigger_sync_now_creates_env_row_and_syncs(db_session, _mock_env): async def test_trigger_sync_now_creates_env_row_and_syncs(db_session, _mock_env):
"""Verify that trigger_sync_now creates an Environment row in DB before syncing, """Verify that trigger_sync_now creates an Environment row in DB before syncing,
@@ -507,4 +519,4 @@ async def test_dry_run_migration_rejects_same_environment(db_session):
assert exc.value.status_code == 400 assert exc.value.status_code == 400
# [/DEF:backend.src.api.routes.__tests__.test_migration_routes:Module] # [/DEF:TestMigrationRoutes:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.api.routes.__tests__.test_profile_api:Module] # [DEF:TestProfileApi:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, profile, api, preferences, lookup, contract # @SEMANTICS: tests, profile, api, preferences, lookup, contract
# @PURPOSE: Verifies profile API route contracts for preference read/update and Superset account lookup. # @PURPOSE: Verifies profile API route contracts for preference read/update and Superset account lookup.
# @LAYER: API # @LAYER: API
# @RELATION: TESTS -> backend.src.api.routes.profile
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -34,6 +34,7 @@ client = TestClient(app)
# [DEF:mock_profile_route_dependencies:Function] # [DEF:mock_profile_route_dependencies:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Provides deterministic dependency overrides for profile route tests. # @PURPOSE: Provides deterministic dependency overrides for profile route tests.
# @PRE: App instance is initialized. # @PRE: App instance is initialized.
# @POST: Dependencies are overridden for current test and restored afterward. # @POST: Dependencies are overridden for current test and restored afterward.
@@ -54,6 +55,7 @@ def mock_profile_route_dependencies():
# [DEF:profile_route_deps_fixture:Function] # [DEF:profile_route_deps_fixture:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Pytest fixture wrapper for profile route dependency overrides. # @PURPOSE: Pytest fixture wrapper for profile route dependency overrides.
# @PRE: None. # @PRE: None.
# @POST: Yields overridden dependencies and clears overrides after test. # @POST: Yields overridden dependencies and clears overrides after test.
@@ -69,6 +71,7 @@ def profile_route_deps_fixture():
# [DEF:_build_preference_response:Function] # [DEF:_build_preference_response:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Builds stable profile preference response payload for route tests. # @PURPOSE: Builds stable profile preference response payload for route tests.
# @PRE: user_id is provided. # @PRE: user_id is provided.
# @POST: Returns ProfilePreferenceResponse object with deterministic timestamps. # @POST: Returns ProfilePreferenceResponse object with deterministic timestamps.
@@ -109,6 +112,7 @@ def _build_preference_response(user_id: str = "u-1") -> ProfilePreferenceRespons
# [DEF:test_get_profile_preferences_returns_self_payload:Function] # [DEF:test_get_profile_preferences_returns_self_payload:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies GET /api/profile/preferences returns stable self-scoped payload. # @PURPOSE: Verifies GET /api/profile/preferences returns stable self-scoped payload.
# @PRE: Authenticated user context is available. # @PRE: Authenticated user context is available.
# @POST: Response status is 200 and payload contains current user preference. # @POST: Response status is 200 and payload contains current user preference.
@@ -141,6 +145,7 @@ def test_get_profile_preferences_returns_self_payload(profile_route_deps_fixture
# [DEF:test_patch_profile_preferences_success:Function] # [DEF:test_patch_profile_preferences_success:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies PATCH /api/profile/preferences persists valid payload through route mapping. # @PURPOSE: Verifies PATCH /api/profile/preferences persists valid payload through route mapping.
# @PRE: Valid request payload and authenticated user. # @PRE: Valid request payload and authenticated user.
# @POST: Response status is 200 with saved preference payload. # @POST: Response status is 200 with saved preference payload.
@@ -191,6 +196,7 @@ def test_patch_profile_preferences_success(profile_route_deps_fixture):
# [DEF:test_patch_profile_preferences_validation_error:Function] # [DEF:test_patch_profile_preferences_validation_error:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies route maps domain validation failure to HTTP 422 with actionable details. # @PURPOSE: Verifies route maps domain validation failure to HTTP 422 with actionable details.
# @PRE: Service raises ProfileValidationError. # @PRE: Service raises ProfileValidationError.
# @POST: Response status is 422 and includes validation messages. # @POST: Response status is 422 and includes validation messages.
@@ -217,6 +223,7 @@ def test_patch_profile_preferences_validation_error(profile_route_deps_fixture):
# [DEF:test_patch_profile_preferences_cross_user_denied:Function] # [DEF:test_patch_profile_preferences_cross_user_denied:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies route maps domain authorization guard failure to HTTP 403. # @PURPOSE: Verifies route maps domain authorization guard failure to HTTP 403.
# @PRE: Service raises ProfileAuthorizationError. # @PRE: Service raises ProfileAuthorizationError.
# @POST: Response status is 403 with denial message. # @POST: Response status is 403 with denial message.
@@ -242,6 +249,7 @@ def test_patch_profile_preferences_cross_user_denied(profile_route_deps_fixture)
# [DEF:test_lookup_superset_accounts_success:Function] # [DEF:test_lookup_superset_accounts_success:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies lookup route returns success payload with normalized candidates. # @PURPOSE: Verifies lookup route returns success payload with normalized candidates.
# @PRE: Valid environment_id and service success response. # @PRE: Valid environment_id and service success response.
# @POST: Response status is 200 and items list is returned. # @POST: Response status is 200 and items list is returned.
@@ -278,6 +286,7 @@ def test_lookup_superset_accounts_success(profile_route_deps_fixture):
# [DEF:test_lookup_superset_accounts_env_not_found:Function] # [DEF:test_lookup_superset_accounts_env_not_found:Function]
# @RELATION: BINDS_TO -> TestProfileApi
# @PURPOSE: Verifies lookup route maps missing environment to HTTP 404. # @PURPOSE: Verifies lookup route maps missing environment to HTTP 404.
# @PRE: Service raises EnvironmentNotFoundError. # @PRE: Service raises EnvironmentNotFoundError.
# @POST: Response status is 404 with explicit message. # @POST: Response status is 404 with explicit message.
@@ -295,4 +304,4 @@ def test_lookup_superset_accounts_env_not_found(profile_route_deps_fixture):
assert payload["detail"] == "Environment 'missing-env' not found" assert payload["detail"] == "Environment 'missing-env' not found"
# [/DEF:test_lookup_superset_accounts_env_not_found:Function] # [/DEF:test_lookup_superset_accounts_env_not_found:Function]
# [/DEF:backend.src.api.routes.__tests__.test_profile_api:Module] # [/DEF:TestProfileApi:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.test_reports_api:Module] # [DEF:TestReportsApi:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, reports, api, contract, pagination, filtering # @SEMANTICS: tests, reports, api, contract, pagination, filtering
# @PURPOSE: Contract tests for GET /api/reports defaults, pagination, and filtering behavior. # @PURPOSE: Contract tests for GET /api/reports defaults, pagination, and filtering behavior.
# @LAYER: Domain (Tests) # @LAYER: Domain (Tests)
# @RELATION: TESTS -> backend.src.api.routes.reports
# @INVARIANT: API response contract contains {items,total,page,page_size,has_next,applied_filters}. # @INVARIANT: API response contract contains {items,total,page,page_size,has_next,applied_filters}.
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
@@ -24,12 +24,26 @@ class _FakeTaskManager:
return self._tasks return self._tasks
# [DEF:_admin_user:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def _admin_user(): def _admin_user():
admin_role = SimpleNamespace(name="Admin", permissions=[]) admin_role = SimpleNamespace(name="Admin", permissions=[])
return SimpleNamespace(username="test-admin", roles=[admin_role]) return SimpleNamespace(username="test-admin", roles=[admin_role])
def _make_task(task_id: str, plugin_id: str, status: TaskStatus, started_at: datetime, finished_at: datetime = None, result=None): # [/DEF:_admin_user:Function]
# [DEF:_make_task:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def _make_task(
task_id: str,
plugin_id: str,
status: TaskStatus,
started_at: datetime,
finished_at: datetime = None,
result=None,
):
return Task( return Task(
id=task_id, id=task_id,
plugin_id=plugin_id, plugin_id=plugin_id,
@@ -41,12 +55,35 @@ def _make_task(task_id: str, plugin_id: str, status: TaskStatus, started_at: dat
) )
# [/DEF:_make_task:Function]
# [DEF:test_get_reports_default_pagination_contract:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def test_get_reports_default_pagination_contract(): def test_get_reports_default_pagination_contract():
now = datetime.utcnow() now = datetime.utcnow()
tasks = [ tasks = [
_make_task("t-1", "superset-backup", TaskStatus.SUCCESS, now - timedelta(minutes=10), now - timedelta(minutes=9)), _make_task(
_make_task("t-2", "superset-migration", TaskStatus.FAILED, now - timedelta(minutes=8), now - timedelta(minutes=7)), "t-1",
_make_task("t-3", "llm_dashboard_validation", TaskStatus.RUNNING, now - timedelta(minutes=6), None), "superset-backup",
TaskStatus.SUCCESS,
now - timedelta(minutes=10),
now - timedelta(minutes=9),
),
_make_task(
"t-2",
"superset-migration",
TaskStatus.FAILED,
now - timedelta(minutes=8),
now - timedelta(minutes=7),
),
_make_task(
"t-3",
"llm_dashboard_validation",
TaskStatus.RUNNING,
now - timedelta(minutes=6),
None,
),
] ]
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
@@ -58,7 +95,9 @@ def test_get_reports_default_pagination_contract():
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert set(["items", "total", "page", "page_size", "has_next", "applied_filters"]).issubset(data.keys()) assert set(
["items", "total", "page", "page_size", "has_next", "applied_filters"]
).issubset(data.keys())
assert data["page"] == 1 assert data["page"] == 1
assert data["page_size"] == 20 assert data["page_size"] == 20
assert data["total"] == 3 assert data["total"] == 3
@@ -69,12 +108,35 @@ def test_get_reports_default_pagination_contract():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_get_reports_default_pagination_contract:Function]
# [DEF:test_get_reports_filter_and_pagination:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def test_get_reports_filter_and_pagination(): def test_get_reports_filter_and_pagination():
now = datetime.utcnow() now = datetime.utcnow()
tasks = [ tasks = [
_make_task("t-1", "superset-backup", TaskStatus.SUCCESS, now - timedelta(minutes=30), now - timedelta(minutes=29)), _make_task(
_make_task("t-2", "superset-backup", TaskStatus.FAILED, now - timedelta(minutes=20), now - timedelta(minutes=19)), "t-1",
_make_task("t-3", "superset-migration", TaskStatus.FAILED, now - timedelta(minutes=10), now - timedelta(minutes=9)), "superset-backup",
TaskStatus.SUCCESS,
now - timedelta(minutes=30),
now - timedelta(minutes=29),
),
_make_task(
"t-2",
"superset-backup",
TaskStatus.FAILED,
now - timedelta(minutes=20),
now - timedelta(minutes=19),
),
_make_task(
"t-3",
"superset-migration",
TaskStatus.FAILED,
now - timedelta(minutes=10),
now - timedelta(minutes=9),
),
] ]
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
@@ -82,7 +144,9 @@ def test_get_reports_filter_and_pagination():
try: try:
client = TestClient(app) client = TestClient(app)
response = client.get("/api/reports?task_types=backup&statuses=failed&page=1&page_size=1") response = client.get(
"/api/reports?task_types=backup&statuses=failed&page=1&page_size=1"
)
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
@@ -97,12 +161,29 @@ def test_get_reports_filter_and_pagination():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_get_reports_filter_and_pagination:Function]
# [DEF:test_get_reports_handles_mixed_naive_and_aware_datetimes:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def test_get_reports_handles_mixed_naive_and_aware_datetimes(): def test_get_reports_handles_mixed_naive_and_aware_datetimes():
naive_now = datetime.utcnow() naive_now = datetime.utcnow()
aware_now = datetime.now(timezone.utc) aware_now = datetime.now(timezone.utc)
tasks = [ tasks = [
_make_task("t-naive", "superset-backup", TaskStatus.SUCCESS, naive_now - timedelta(minutes=5), naive_now - timedelta(minutes=4)), _make_task(
_make_task("t-aware", "superset-migration", TaskStatus.FAILED, aware_now - timedelta(minutes=3), aware_now - timedelta(minutes=2)), "t-naive",
"superset-backup",
TaskStatus.SUCCESS,
naive_now - timedelta(minutes=5),
naive_now - timedelta(minutes=4),
),
_make_task(
"t-aware",
"superset-migration",
TaskStatus.FAILED,
aware_now - timedelta(minutes=3),
aware_now - timedelta(minutes=2),
),
] ]
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
@@ -119,9 +200,22 @@ def test_get_reports_handles_mixed_naive_and_aware_datetimes():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_get_reports_handles_mixed_naive_and_aware_datetimes:Function]
# [DEF:test_get_reports_invalid_filter_returns_400:Function]
# @RELATION: BINDS_TO -> TestReportsApi
def test_get_reports_invalid_filter_returns_400(): def test_get_reports_invalid_filter_returns_400():
now = datetime.utcnow() now = datetime.utcnow()
tasks = [_make_task("t-1", "superset-backup", TaskStatus.SUCCESS, now - timedelta(minutes=5), now - timedelta(minutes=4))] tasks = [
_make_task(
"t-1",
"superset-backup",
TaskStatus.SUCCESS,
now - timedelta(minutes=5),
now - timedelta(minutes=4),
)
]
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
app.dependency_overrides[get_task_manager] = lambda: _FakeTaskManager(tasks) app.dependency_overrides[get_task_manager] = lambda: _FakeTaskManager(tasks)
@@ -136,4 +230,5 @@ def test_get_reports_invalid_filter_returns_400():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.tests.test_reports_api:Module] # [/DEF:test_get_reports_invalid_filter_returns_400:Function]
# [/DEF:TestReportsApi:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.test_reports_detail_api:Module] # [DEF:TestReportsDetailApi:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, reports, api, detail, diagnostics # @SEMANTICS: tests, reports, api, detail, diagnostics
# @PURPOSE: Contract tests for GET /api/reports/{report_id} detail endpoint behavior. # @PURPOSE: Contract tests for GET /api/reports/{report_id} detail endpoint behavior.
# @LAYER: Domain (Tests) # @LAYER: Domain (Tests)
# @RELATION: TESTS -> backend.src.api.routes.reports
# @INVARIANT: Detail endpoint tests must keep deterministic assertions for success and not-found contracts. # @INVARIANT: Detail endpoint tests must keep deterministic assertions for success and not-found contracts.
from datetime import datetime, timedelta from datetime import datetime, timedelta
@@ -24,11 +24,18 @@ class _FakeTaskManager:
return self._tasks return self._tasks
# [DEF:_admin_user:Function]
# @RELATION: BINDS_TO -> TestReportsDetailApi
def _admin_user(): def _admin_user():
role = SimpleNamespace(name="Admin", permissions=[]) role = SimpleNamespace(name="Admin", permissions=[])
return SimpleNamespace(username="test-admin", roles=[role]) return SimpleNamespace(username="test-admin", roles=[role])
# [/DEF:_admin_user:Function]
# [DEF:_make_task:Function]
# @RELATION: BINDS_TO -> TestReportsDetailApi
def _make_task(task_id: str, plugin_id: str, status: TaskStatus, result=None): def _make_task(task_id: str, plugin_id: str, status: TaskStatus, result=None):
now = datetime.utcnow() now = datetime.utcnow()
return Task( return Task(
@@ -36,18 +43,30 @@ def _make_task(task_id: str, plugin_id: str, status: TaskStatus, result=None):
plugin_id=plugin_id, plugin_id=plugin_id,
status=status, status=status,
started_at=now - timedelta(minutes=2), started_at=now - timedelta(minutes=2),
finished_at=now - timedelta(minutes=1) if status != TaskStatus.RUNNING else None, finished_at=now - timedelta(minutes=1)
if status != TaskStatus.RUNNING
else None,
params={"environment_id": "env-1"}, params={"environment_id": "env-1"},
result=result or {"summary": f"{plugin_id} result"}, result=result or {"summary": f"{plugin_id} result"},
) )
# [/DEF:_make_task:Function]
# [DEF:test_get_report_detail_success:Function]
# @RELATION: BINDS_TO -> TestReportsDetailApi
def test_get_report_detail_success(): def test_get_report_detail_success():
task = _make_task( task = _make_task(
"detail-1", "detail-1",
"superset-migration", "superset-migration",
TaskStatus.FAILED, TaskStatus.FAILED,
result={"error": {"message": "Step failed", "next_actions": ["Check mapping", "Retry"]}}, result={
"error": {
"message": "Step failed",
"next_actions": ["Check mapping", "Retry"],
}
},
) )
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
@@ -67,6 +86,11 @@ def test_get_report_detail_success():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_get_report_detail_success:Function]
# [DEF:test_get_report_detail_not_found:Function]
# @RELATION: BINDS_TO -> TestReportsDetailApi
def test_get_report_detail_not_found(): def test_get_report_detail_not_found():
task = _make_task("detail-2", "superset-backup", TaskStatus.SUCCESS) task = _make_task("detail-2", "superset-backup", TaskStatus.SUCCESS)
@@ -81,4 +105,5 @@ def test_get_report_detail_not_found():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.tests.test_reports_detail_api:Module] # [/DEF:test_get_report_detail_not_found:Function]
# [/DEF:TestReportsDetailApi:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.test_reports_openapi_conformance:Module] # [DEF:TestReportsOpenapiConformance:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, reports, openapi, conformance # @SEMANTICS: tests, reports, openapi, conformance
# @PURPOSE: Validate implemented reports payload shape against OpenAPI-required top-level contract fields. # @PURPOSE: Validate implemented reports payload shape against OpenAPI-required top-level contract fields.
# @LAYER: Domain (Tests) # @LAYER: Domain (Tests)
# @RELATION: TESTS -> specs/020-task-reports-design/contracts/reports-api.openapi.yaml
# @INVARIANT: List and detail payloads include required contract keys. # @INVARIANT: List and detail payloads include required contract keys.
from datetime import datetime from datetime import datetime
@@ -24,11 +24,18 @@ class _FakeTaskManager:
return self._tasks return self._tasks
# [DEF:_admin_user:Function]
# @RELATION: BINDS_TO -> TestReportsOpenapiConformance
def _admin_user(): def _admin_user():
role = SimpleNamespace(name="Admin", permissions=[]) role = SimpleNamespace(name="Admin", permissions=[])
return SimpleNamespace(username="test-admin", roles=[role]) return SimpleNamespace(username="test-admin", roles=[role])
# [/DEF:_admin_user:Function]
# [DEF:_task:Function]
# @RELATION: BINDS_TO -> TestReportsOpenapiConformance
def _task(task_id: str, plugin_id: str, status: TaskStatus): def _task(task_id: str, plugin_id: str, status: TaskStatus):
now = datetime.utcnow() now = datetime.utcnow()
return Task( return Task(
@@ -42,6 +49,11 @@ def _task(task_id: str, plugin_id: str, status: TaskStatus):
) )
# [/DEF:_task:Function]
# [DEF:test_reports_list_openapi_required_keys:Function]
# @RELATION: BINDS_TO -> TestReportsOpenapiConformance
def test_reports_list_openapi_required_keys(): def test_reports_list_openapi_required_keys():
tasks = [ tasks = [
_task("r-1", "superset-backup", TaskStatus.SUCCESS), _task("r-1", "superset-backup", TaskStatus.SUCCESS),
@@ -56,12 +68,24 @@ def test_reports_list_openapi_required_keys():
assert response.status_code == 200 assert response.status_code == 200
body = response.json() body = response.json()
required = {"items", "total", "page", "page_size", "has_next", "applied_filters"} required = {
"items",
"total",
"page",
"page_size",
"has_next",
"applied_filters",
}
assert required.issubset(body.keys()) assert required.issubset(body.keys())
finally: finally:
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:test_reports_list_openapi_required_keys:Function]
# [DEF:test_reports_detail_openapi_required_keys:Function]
# @RELATION: BINDS_TO -> TestReportsOpenapiConformance
def test_reports_detail_openapi_required_keys(): def test_reports_detail_openapi_required_keys():
tasks = [_task("r-3", "llm_dashboard_validation", TaskStatus.SUCCESS)] tasks = [_task("r-3", "llm_dashboard_validation", TaskStatus.SUCCESS)]
app.dependency_overrides[get_current_user] = lambda: _admin_user() app.dependency_overrides[get_current_user] = lambda: _admin_user()
@@ -78,4 +102,5 @@ def test_reports_detail_openapi_required_keys():
app.dependency_overrides.clear() app.dependency_overrides.clear()
# [/DEF:backend.tests.test_reports_openapi_conformance:Module] # [/DEF:test_reports_detail_openapi_required_keys:Function]
# [/DEF:TestReportsOpenapiConformance:Module]

View File

@@ -27,6 +27,8 @@ def client():
# @TEST_CONTRACT: get_task_logs_api -> Invariants # @TEST_CONTRACT: get_task_logs_api -> Invariants
# @TEST_FIXTURE: valid_task_logs_request # @TEST_FIXTURE: valid_task_logs_request
# [DEF:test_get_task_logs_success:Function]
# @RELATION: BINDS_TO -> __tests__/test_tasks_logs
def test_get_task_logs_success(client): def test_get_task_logs_success(client):
tc, tm = client tc, tm = client
@@ -46,6 +48,10 @@ def test_get_task_logs_success(client):
assert args[0][1].level == "INFO" assert args[0][1].level == "INFO"
# @TEST_EDGE: task_not_found # @TEST_EDGE: task_not_found
# [/DEF:test_get_task_logs_success:Function]
# [DEF:test_get_task_logs_not_found:Function]
# @RELATION: BINDS_TO -> __tests__/test_tasks_logs
def test_get_task_logs_not_found(client): def test_get_task_logs_not_found(client):
tc, tm = client tc, tm = client
tm.get_task.return_value = None tm.get_task.return_value = None
@@ -55,6 +61,10 @@ def test_get_task_logs_not_found(client):
assert response.json()["detail"] == "Task not found" assert response.json()["detail"] == "Task not found"
# @TEST_EDGE: invalid_limit # @TEST_EDGE: invalid_limit
# [/DEF:test_get_task_logs_not_found:Function]
# [DEF:test_get_task_logs_invalid_limit:Function]
# @RELATION: BINDS_TO -> __tests__/test_tasks_logs
def test_get_task_logs_invalid_limit(client): def test_get_task_logs_invalid_limit(client):
tc, tm = client tc, tm = client
# limit=0 is ge=1 in Query # limit=0 is ge=1 in Query
@@ -62,6 +72,10 @@ def test_get_task_logs_invalid_limit(client):
assert response.status_code == 422 assert response.status_code == 422
# @TEST_INVARIANT: response_purity # @TEST_INVARIANT: response_purity
# [/DEF:test_get_task_logs_invalid_limit:Function]
# [DEF:test_get_task_log_stats_success:Function]
# @RELATION: BINDS_TO -> __tests__/test_tasks_logs
def test_get_task_log_stats_success(client): def test_get_task_log_stats_success(client):
tc, tm = client tc, tm = client
tm.get_task.return_value = MagicMock() tm.get_task.return_value = MagicMock()
@@ -71,3 +85,4 @@ def test_get_task_log_stats_success(client):
assert response.status_code == 200 assert response.status_code == 200
# response_model=LogStats might wrap this, but let's check basic structure # response_model=LogStats might wrap this, but let's check basic structure
# assuming tm.get_task_log_stats returns something compatible with LogStats # assuming tm.get_task_log_stats returns something compatible with LogStats
# [/DEF:test_get_task_log_stats_success:Function]

View File

@@ -31,6 +31,7 @@ from ...services.rbac_permission_catalog import (
# [/SECTION] # [/SECTION]
# [DEF:router:Variable] # [DEF:router:Variable]
# @RELATION: DEPENDS_ON -> fastapi.APIRouter
# @PURPOSE: APIRouter instance for admin routes. # @PURPOSE: APIRouter instance for admin routes.
router = APIRouter(prefix="/api/admin", tags=["admin"]) router = APIRouter(prefix="/api/admin", tags=["admin"])
# [/DEF:router:Variable] # [/DEF:router:Variable]
@@ -42,6 +43,7 @@ router = APIRouter(prefix="/api/admin", tags=["admin"])
# @POST: Returns a list of UserSchema objects. # @POST: Returns a list of UserSchema objects.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: List[UserSchema] - List of users. # @RETURN: List[UserSchema] - List of users.
# @RELATION: CALLS -> User
@router.get("/users", response_model=List[UserSchema]) @router.get("/users", response_model=List[UserSchema])
async def list_users( async def list_users(
db: Session = Depends(get_auth_db), db: Session = Depends(get_auth_db),
@@ -60,6 +62,7 @@ async def list_users(
# @PARAM: user_in (UserCreate) - New user data. # @PARAM: user_in (UserCreate) - New user data.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: UserSchema - The created user. # @RETURN: UserSchema - The created user.
# @RELATION: CALLS -> AuthRepository
@router.post("/users", response_model=UserSchema, status_code=status.HTTP_201_CREATED) @router.post("/users", response_model=UserSchema, status_code=status.HTTP_201_CREATED)
async def create_user( async def create_user(
user_in: UserCreate, user_in: UserCreate,
@@ -99,6 +102,7 @@ async def create_user(
# @PARAM: user_in (UserUpdate) - Updated user data. # @PARAM: user_in (UserUpdate) - Updated user data.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: UserSchema - The updated user profile. # @RETURN: UserSchema - The updated user profile.
# @RELATION: CALLS -> AuthRepository
@router.put("/users/{user_id}", response_model=UserSchema) @router.put("/users/{user_id}", response_model=UserSchema)
async def update_user( async def update_user(
user_id: str, user_id: str,
@@ -139,6 +143,7 @@ async def update_user(
# @PARAM: user_id (str) - Target user UUID. # @PARAM: user_id (str) - Target user UUID.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: None # @RETURN: None
# @RELATION: CALLS -> AuthRepository
@router.delete("/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT) @router.delete("/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_user( async def delete_user(
user_id: str, user_id: str,
@@ -313,6 +318,7 @@ async def list_permissions(
# [DEF:list_ad_mappings:Function] # [DEF:list_ad_mappings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Lists all AD Group to Role mappings. # @PURPOSE: Lists all AD Group to Role mappings.
# @RELATION: CALLS -> ADGroupMapping
@router.get("/ad-mappings", response_model=List[ADGroupMappingSchema]) @router.get("/ad-mappings", response_model=List[ADGroupMappingSchema])
async def list_ad_mappings( async def list_ad_mappings(
db: Session = Depends(get_auth_db), db: Session = Depends(get_auth_db),
@@ -323,7 +329,8 @@ async def list_ad_mappings(
# [/DEF:list_ad_mappings:Function] # [/DEF:list_ad_mappings:Function]
# [DEF:create_ad_mapping:Function] # [DEF:create_ad_mapping:Function]
# @COMPLEXITY: 3 # @RELATION: CALLS -> AuthRepository
# @COMPLEXITY: 2
# @PURPOSE: Creates a new AD Group mapping. # @PURPOSE: Creates a new AD Group mapping.
@router.post("/ad-mappings", response_model=ADGroupMappingSchema) @router.post("/ad-mappings", response_model=ADGroupMappingSchema)
async def create_ad_mapping( async def create_ad_mapping(

View File

@@ -1,5 +1,5 @@
# [DEF:backend.src.api.routes.clean_release:Module] # [DEF:backend.src.api.routes.clean_release:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 4
# @SEMANTICS: api, clean-release, candidate-preparation, compliance # @SEMANTICS: api, clean-release, candidate-preparation, compliance
# @PURPOSE: Expose clean release endpoints for candidate preparation and subsequent compliance flow. # @PURPOSE: Expose clean release endpoints for candidate preparation and subsequent compliance flow.
# @LAYER: API # @LAYER: API
@@ -19,10 +19,20 @@ from ...core.logger import belief_scope, logger
from ...dependencies import get_clean_release_repository, get_config_manager from ...dependencies import get_clean_release_repository, get_config_manager
from ...services.clean_release.preparation_service import prepare_candidate from ...services.clean_release.preparation_service import prepare_candidate
from ...services.clean_release.repository import CleanReleaseRepository from ...services.clean_release.repository import CleanReleaseRepository
from ...services.clean_release.compliance_orchestrator import CleanComplianceOrchestrator from ...services.clean_release.compliance_orchestrator import (
CleanComplianceOrchestrator,
)
from ...services.clean_release.report_builder import ComplianceReportBuilder from ...services.clean_release.report_builder import ComplianceReportBuilder
from ...services.clean_release.compliance_execution_service import ComplianceExecutionService, ComplianceRunError from ...services.clean_release.compliance_execution_service import (
from ...services.clean_release.dto import CandidateDTO, ManifestDTO, CandidateOverviewDTO, ComplianceRunDTO ComplianceExecutionService,
ComplianceRunError,
)
from ...services.clean_release.dto import (
CandidateDTO,
ManifestDTO,
CandidateOverviewDTO,
ComplianceRunDTO,
)
from ...services.clean_release.enums import ( from ...services.clean_release.enums import (
ComplianceDecision, ComplianceDecision,
ComplianceStageName, ComplianceStageName,
@@ -49,6 +59,8 @@ class PrepareCandidateRequest(BaseModel):
artifacts: List[Dict[str, Any]] = Field(default_factory=list) artifacts: List[Dict[str, Any]] = Field(default_factory=list)
sources: List[str] = Field(default_factory=list) sources: List[str] = Field(default_factory=list)
operator_id: str = Field(min_length=1) operator_id: str = Field(min_length=1)
# [/DEF:PrepareCandidateRequest:Class] # [/DEF:PrepareCandidateRequest:Class]
@@ -59,6 +71,8 @@ class StartCheckRequest(BaseModel):
profile: str = Field(default="enterprise-clean") profile: str = Field(default="enterprise-clean")
execution_mode: str = Field(default="tui") execution_mode: str = Field(default="tui")
triggered_by: str = Field(default="system") triggered_by: str = Field(default="system")
# [/DEF:StartCheckRequest:Class] # [/DEF:StartCheckRequest:Class]
@@ -69,6 +83,8 @@ class RegisterCandidateRequest(BaseModel):
version: str = Field(min_length=1) version: str = Field(min_length=1)
source_snapshot_ref: str = Field(min_length=1) source_snapshot_ref: str = Field(min_length=1)
created_by: str = Field(min_length=1) created_by: str = Field(min_length=1)
# [/DEF:RegisterCandidateRequest:Class] # [/DEF:RegisterCandidateRequest:Class]
@@ -76,6 +92,8 @@ class RegisterCandidateRequest(BaseModel):
# @PURPOSE: Request schema for candidate artifact import endpoint. # @PURPOSE: Request schema for candidate artifact import endpoint.
class ImportArtifactsRequest(BaseModel): class ImportArtifactsRequest(BaseModel):
artifacts: List[Dict[str, Any]] = Field(default_factory=list) artifacts: List[Dict[str, Any]] = Field(default_factory=list)
# [/DEF:ImportArtifactsRequest:Class] # [/DEF:ImportArtifactsRequest:Class]
@@ -83,6 +101,8 @@ class ImportArtifactsRequest(BaseModel):
# @PURPOSE: Request schema for manifest build endpoint. # @PURPOSE: Request schema for manifest build endpoint.
class BuildManifestRequest(BaseModel): class BuildManifestRequest(BaseModel):
created_by: str = Field(default="system") created_by: str = Field(default="system")
# [/DEF:BuildManifestRequest:Class] # [/DEF:BuildManifestRequest:Class]
@@ -91,6 +111,8 @@ class BuildManifestRequest(BaseModel):
class CreateComplianceRunRequest(BaseModel): class CreateComplianceRunRequest(BaseModel):
requested_by: str = Field(min_length=1) requested_by: str = Field(min_length=1)
manifest_id: str | None = None manifest_id: str | None = None
# [/DEF:CreateComplianceRunRequest:Class] # [/DEF:CreateComplianceRunRequest:Class]
@@ -98,14 +120,19 @@ class CreateComplianceRunRequest(BaseModel):
# @PURPOSE: Register a clean-release candidate for headless lifecycle. # @PURPOSE: Register a clean-release candidate for headless lifecycle.
# @PRE: Candidate identifier is unique. # @PRE: Candidate identifier is unique.
# @POST: Candidate is persisted in DRAFT status. # @POST: Candidate is persisted in DRAFT status.
@router.post("/candidates", response_model=CandidateDTO, status_code=status.HTTP_201_CREATED) @router.post(
"/candidates", response_model=CandidateDTO, status_code=status.HTTP_201_CREATED
)
async def register_candidate_v2_endpoint( async def register_candidate_v2_endpoint(
payload: RegisterCandidateRequest, payload: RegisterCandidateRequest,
repository: CleanReleaseRepository = Depends(get_clean_release_repository), repository: CleanReleaseRepository = Depends(get_clean_release_repository),
): ):
existing = repository.get_candidate(payload.id) existing = repository.get_candidate(payload.id)
if existing is not None: if existing is not None:
raise HTTPException(status_code=409, detail={"message": "Candidate already exists", "code": "CANDIDATE_EXISTS"}) raise HTTPException(
status_code=409,
detail={"message": "Candidate already exists", "code": "CANDIDATE_EXISTS"},
)
candidate = ReleaseCandidate( candidate = ReleaseCandidate(
id=payload.id, id=payload.id,
@@ -125,6 +152,8 @@ async def register_candidate_v2_endpoint(
created_by=candidate.created_by, created_by=candidate.created_by,
status=CandidateStatus(candidate.status), status=CandidateStatus(candidate.status),
) )
# [/DEF:register_candidate_v2_endpoint:Function] # [/DEF:register_candidate_v2_endpoint:Function]
@@ -140,9 +169,15 @@ async def import_candidate_artifacts_v2_endpoint(
): ):
candidate = repository.get_candidate(candidate_id) candidate = repository.get_candidate(candidate_id)
if candidate is None: if candidate is None:
raise HTTPException(status_code=404, detail={"message": "Candidate not found", "code": "CANDIDATE_NOT_FOUND"}) raise HTTPException(
status_code=404,
detail={"message": "Candidate not found", "code": "CANDIDATE_NOT_FOUND"},
)
if not payload.artifacts: if not payload.artifacts:
raise HTTPException(status_code=400, detail={"message": "Artifacts list is required", "code": "ARTIFACTS_EMPTY"}) raise HTTPException(
status_code=400,
detail={"message": "Artifacts list is required", "code": "ARTIFACTS_EMPTY"},
)
for artifact in payload.artifacts: for artifact in payload.artifacts:
required = ("id", "path", "sha256", "size") required = ("id", "path", "sha256", "size")
@@ -150,7 +185,10 @@ async def import_candidate_artifacts_v2_endpoint(
if field_name not in artifact: if field_name not in artifact:
raise HTTPException( raise HTTPException(
status_code=400, status_code=400,
detail={"message": f"Artifact missing field '{field_name}'", "code": "ARTIFACT_INVALID"}, detail={
"message": f"Artifact missing field '{field_name}'",
"code": "ARTIFACT_INVALID",
},
) )
artifact_model = CandidateArtifact( artifact_model = CandidateArtifact(
@@ -172,6 +210,8 @@ async def import_candidate_artifacts_v2_endpoint(
repository.save_candidate(candidate) repository.save_candidate(candidate)
return {"status": "success"} return {"status": "success"}
# [/DEF:import_candidate_artifacts_v2_endpoint:Function] # [/DEF:import_candidate_artifacts_v2_endpoint:Function]
@@ -179,7 +219,11 @@ async def import_candidate_artifacts_v2_endpoint(
# @PURPOSE: Build immutable manifest snapshot for prepared candidate. # @PURPOSE: Build immutable manifest snapshot for prepared candidate.
# @PRE: Candidate exists and has imported artifacts. # @PRE: Candidate exists and has imported artifacts.
# @POST: Returns created ManifestDTO with incremented version. # @POST: Returns created ManifestDTO with incremented version.
@router.post("/candidates/{candidate_id}/manifests", response_model=ManifestDTO, status_code=status.HTTP_201_CREATED) @router.post(
"/candidates/{candidate_id}/manifests",
response_model=ManifestDTO,
status_code=status.HTTP_201_CREATED,
)
async def build_candidate_manifest_v2_endpoint( async def build_candidate_manifest_v2_endpoint(
candidate_id: str, candidate_id: str,
payload: BuildManifestRequest, payload: BuildManifestRequest,
@@ -194,7 +238,10 @@ async def build_candidate_manifest_v2_endpoint(
created_by=payload.created_by, created_by=payload.created_by,
) )
except ValueError as exc: except ValueError as exc:
raise HTTPException(status_code=400, detail={"message": str(exc), "code": "MANIFEST_BUILD_ERROR"}) raise HTTPException(
status_code=400,
detail={"message": str(exc), "code": "MANIFEST_BUILD_ERROR"},
)
return ManifestDTO( return ManifestDTO(
id=manifest.id, id=manifest.id,
@@ -207,6 +254,8 @@ async def build_candidate_manifest_v2_endpoint(
source_snapshot_ref=manifest.source_snapshot_ref, source_snapshot_ref=manifest.source_snapshot_ref,
content_json=manifest.content_json, content_json=manifest.content_json,
) )
# [/DEF:build_candidate_manifest_v2_endpoint:Function] # [/DEF:build_candidate_manifest_v2_endpoint:Function]
@@ -221,26 +270,53 @@ async def get_candidate_overview_v2_endpoint(
): ):
candidate = repository.get_candidate(candidate_id) candidate = repository.get_candidate(candidate_id)
if candidate is None: if candidate is None:
raise HTTPException(status_code=404, detail={"message": "Candidate not found", "code": "CANDIDATE_NOT_FOUND"}) raise HTTPException(
status_code=404,
detail={"message": "Candidate not found", "code": "CANDIDATE_NOT_FOUND"},
)
manifests = repository.get_manifests_by_candidate(candidate_id) manifests = repository.get_manifests_by_candidate(candidate_id)
latest_manifest = sorted(manifests, key=lambda m: m.manifest_version, reverse=True)[0] if manifests else None latest_manifest = (
sorted(manifests, key=lambda m: m.manifest_version, reverse=True)[0]
if manifests
else None
)
runs = [run for run in repository.check_runs.values() if run.candidate_id == candidate_id] runs = [
latest_run = sorted(runs, key=lambda run: run.requested_at or datetime.min.replace(tzinfo=timezone.utc), reverse=True)[0] if runs else None run
for run in repository.check_runs.values()
if run.candidate_id == candidate_id
]
latest_run = (
sorted(
runs,
key=lambda run: run.requested_at
or datetime.min.replace(tzinfo=timezone.utc),
reverse=True,
)[0]
if runs
else None
)
latest_report = None latest_report = None
if latest_run is not None: if latest_run is not None:
latest_report = next((r for r in repository.reports.values() if r.run_id == latest_run.id), None) latest_report = next(
(r for r in repository.reports.values() if r.run_id == latest_run.id), None
)
latest_policy_snapshot = repository.get_policy(latest_run.policy_snapshot_id) if latest_run else None latest_policy_snapshot = (
latest_registry_snapshot = repository.get_registry(latest_run.registry_snapshot_id) if latest_run else None repository.get_policy(latest_run.policy_snapshot_id) if latest_run else None
)
latest_registry_snapshot = (
repository.get_registry(latest_run.registry_snapshot_id) if latest_run else None
)
approval_decisions = getattr(repository, "approval_decisions", []) approval_decisions = getattr(repository, "approval_decisions", [])
latest_approval = ( latest_approval = (
sorted( sorted(
[item for item in approval_decisions if item.candidate_id == candidate_id], [item for item in approval_decisions if item.candidate_id == candidate_id],
key=lambda item: item.decided_at or datetime.min.replace(tzinfo=timezone.utc), key=lambda item: item.decided_at
or datetime.min.replace(tzinfo=timezone.utc),
reverse=True, reverse=True,
)[0] )[0]
if approval_decisions if approval_decisions
@@ -252,7 +328,8 @@ async def get_candidate_overview_v2_endpoint(
latest_publication = ( latest_publication = (
sorted( sorted(
[item for item in publication_records if item.candidate_id == candidate_id], [item for item in publication_records if item.candidate_id == candidate_id],
key=lambda item: item.published_at or datetime.min.replace(tzinfo=timezone.utc), key=lambda item: item.published_at
or datetime.min.replace(tzinfo=timezone.utc),
reverse=True, reverse=True,
)[0] )[0]
if publication_records if publication_records
@@ -266,19 +343,35 @@ async def get_candidate_overview_v2_endpoint(
source_snapshot_ref=candidate.source_snapshot_ref, source_snapshot_ref=candidate.source_snapshot_ref,
status=CandidateStatus(candidate.status), status=CandidateStatus(candidate.status),
latest_manifest_id=latest_manifest.id if latest_manifest else None, latest_manifest_id=latest_manifest.id if latest_manifest else None,
latest_manifest_digest=latest_manifest.manifest_digest if latest_manifest else None, latest_manifest_digest=latest_manifest.manifest_digest
if latest_manifest
else None,
latest_run_id=latest_run.id if latest_run else None, latest_run_id=latest_run.id if latest_run else None,
latest_run_status=RunStatus(latest_run.status) if latest_run else None, latest_run_status=RunStatus(latest_run.status) if latest_run else None,
latest_report_id=latest_report.id if latest_report else None, latest_report_id=latest_report.id if latest_report else None,
latest_report_final_status=ComplianceDecision(latest_report.final_status) if latest_report else None, latest_report_final_status=ComplianceDecision(latest_report.final_status)
latest_policy_snapshot_id=latest_policy_snapshot.id if latest_policy_snapshot else None, if latest_report
latest_policy_version=latest_policy_snapshot.policy_version if latest_policy_snapshot else None, else None,
latest_registry_snapshot_id=latest_registry_snapshot.id if latest_registry_snapshot else None, latest_policy_snapshot_id=latest_policy_snapshot.id
latest_registry_version=latest_registry_snapshot.registry_version if latest_registry_snapshot else None, if latest_policy_snapshot
else None,
latest_policy_version=latest_policy_snapshot.policy_version
if latest_policy_snapshot
else None,
latest_registry_snapshot_id=latest_registry_snapshot.id
if latest_registry_snapshot
else None,
latest_registry_version=latest_registry_snapshot.registry_version
if latest_registry_snapshot
else None,
latest_approval_decision=latest_approval.decision if latest_approval else None, latest_approval_decision=latest_approval.decision if latest_approval else None,
latest_publication_id=latest_publication.id if latest_publication else None, latest_publication_id=latest_publication.id if latest_publication else None,
latest_publication_status=latest_publication.status if latest_publication else None, latest_publication_status=latest_publication.status
if latest_publication
else None,
) )
# [/DEF:get_candidate_overview_v2_endpoint:Function] # [/DEF:get_candidate_overview_v2_endpoint:Function]
@@ -311,6 +404,8 @@ async def prepare_candidate_endpoint(
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
detail={"message": str(exc), "code": "CLEAN_PREPARATION_ERROR"}, detail={"message": str(exc), "code": "CLEAN_PREPARATION_ERROR"},
) )
# [/DEF:prepare_candidate_endpoint:Function] # [/DEF:prepare_candidate_endpoint:Function]
@@ -327,27 +422,46 @@ async def start_check(
logger.reason("Starting clean-release compliance check run") logger.reason("Starting clean-release compliance check run")
policy = repository.get_active_policy() policy = repository.get_active_policy()
if policy is None: if policy is None:
raise HTTPException(status_code=409, detail={"message": "Active policy not found", "code": "POLICY_NOT_FOUND"}) raise HTTPException(
status_code=409,
detail={
"message": "Active policy not found",
"code": "POLICY_NOT_FOUND",
},
)
candidate = repository.get_candidate(payload.candidate_id) candidate = repository.get_candidate(payload.candidate_id)
if candidate is None: if candidate is None:
raise HTTPException(status_code=409, detail={"message": "Candidate not found", "code": "CANDIDATE_NOT_FOUND"}) raise HTTPException(
status_code=409,
detail={
"message": "Candidate not found",
"code": "CANDIDATE_NOT_FOUND",
},
)
manifests = repository.get_manifests_by_candidate(payload.candidate_id) manifests = repository.get_manifests_by_candidate(payload.candidate_id)
if not manifests: if not manifests:
logger.explore("No manifest found for candidate; bootstrapping legacy empty manifest for compatibility") logger.explore(
from ...services.clean_release.manifest_builder import build_distribution_manifest "No manifest found for candidate; bootstrapping legacy empty manifest for compatibility"
)
from ...services.clean_release.manifest_builder import (
build_distribution_manifest,
)
boot_manifest = build_distribution_manifest( boot_manifest = build_distribution_manifest(
manifest_id=f"manifest-{payload.candidate_id}", manifest_id=f"manifest-{payload.candidate_id}",
candidate_id=payload.candidate_id, candidate_id=payload.candidate_id,
policy_id=getattr(policy, "policy_id", None) or getattr(policy, "id", ""), policy_id=getattr(policy, "policy_id", None)
or getattr(policy, "id", ""),
generated_by=payload.triggered_by, generated_by=payload.triggered_by,
artifacts=[], artifacts=[],
) )
repository.save_manifest(boot_manifest) repository.save_manifest(boot_manifest)
manifests = [boot_manifest] manifests = [boot_manifest]
latest_manifest = sorted(manifests, key=lambda m: m.manifest_version, reverse=True)[0] latest_manifest = sorted(
manifests, key=lambda m: m.manifest_version, reverse=True
)[0]
orchestrator = CleanComplianceOrchestrator(repository) orchestrator = CleanComplianceOrchestrator(repository)
run = orchestrator.start_check_run( run = orchestrator.start_check_run(
@@ -364,7 +478,7 @@ async def start_check(
stage_name=ComplianceStageName.DATA_PURITY.value, stage_name=ComplianceStageName.DATA_PURITY.value,
status=RunStatus.SUCCEEDED.value, status=RunStatus.SUCCEEDED.value,
decision=ComplianceDecision.PASSED.value, decision=ComplianceDecision.PASSED.value,
details_json={"message": "ok"} details_json={"message": "ok"},
), ),
ComplianceStageRun( ComplianceStageRun(
id=f"stage-{run.id}-2", id=f"stage-{run.id}-2",
@@ -372,7 +486,7 @@ async def start_check(
stage_name=ComplianceStageName.INTERNAL_SOURCES_ONLY.value, stage_name=ComplianceStageName.INTERNAL_SOURCES_ONLY.value,
status=RunStatus.SUCCEEDED.value, status=RunStatus.SUCCEEDED.value,
decision=ComplianceDecision.PASSED.value, decision=ComplianceDecision.PASSED.value,
details_json={"message": "ok"} details_json={"message": "ok"},
), ),
ComplianceStageRun( ComplianceStageRun(
id=f"stage-{run.id}-3", id=f"stage-{run.id}-3",
@@ -380,7 +494,7 @@ async def start_check(
stage_name=ComplianceStageName.NO_EXTERNAL_ENDPOINTS.value, stage_name=ComplianceStageName.NO_EXTERNAL_ENDPOINTS.value,
status=RunStatus.SUCCEEDED.value, status=RunStatus.SUCCEEDED.value,
decision=ComplianceDecision.PASSED.value, decision=ComplianceDecision.PASSED.value,
details_json={"message": "ok"} details_json={"message": "ok"},
), ),
ComplianceStageRun( ComplianceStageRun(
id=f"stage-{run.id}-4", id=f"stage-{run.id}-4",
@@ -388,14 +502,20 @@ async def start_check(
stage_name=ComplianceStageName.MANIFEST_CONSISTENCY.value, stage_name=ComplianceStageName.MANIFEST_CONSISTENCY.value,
status=RunStatus.SUCCEEDED.value, status=RunStatus.SUCCEEDED.value,
decision=ComplianceDecision.PASSED.value, decision=ComplianceDecision.PASSED.value,
details_json={"message": "ok"} details_json={"message": "ok"},
), ),
] ]
run = orchestrator.execute_stages(run, forced_results=forced) run = orchestrator.execute_stages(run, forced_results=forced)
run = orchestrator.finalize_run(run) run = orchestrator.finalize_run(run)
if str(run.final_status) in {ComplianceDecision.BLOCKED.value, "CheckFinalStatus.BLOCKED", "BLOCKED"}: if str(run.final_status) in {
logger.explore("Run ended as BLOCKED, persisting synthetic external-source violation") ComplianceDecision.BLOCKED.value,
"CheckFinalStatus.BLOCKED",
"BLOCKED",
}:
logger.explore(
"Run ended as BLOCKED, persisting synthetic external-source violation"
)
violation = ComplianceViolation( violation = ComplianceViolation(
id=f"viol-{run.id}", id=f"viol-{run.id}",
run_id=run.id, run_id=run.id,
@@ -403,12 +523,14 @@ async def start_check(
code="EXTERNAL_SOURCE_DETECTED", code="EXTERNAL_SOURCE_DETECTED",
severity=ViolationSeverity.CRITICAL.value, severity=ViolationSeverity.CRITICAL.value,
message="Replace with approved internal server", message="Replace with approved internal server",
evidence_json={"location": "external.example.com"} evidence_json={"location": "external.example.com"},
) )
repository.save_violation(violation) repository.save_violation(violation)
builder = ComplianceReportBuilder(repository) builder = ComplianceReportBuilder(repository)
report = builder.build_report_payload(run, repository.get_violations_by_run(run.id)) report = builder.build_report_payload(
run, repository.get_violations_by_run(run.id)
)
builder.persist_report(report) builder.persist_report(report)
logger.reflect(f"Compliance report persisted for run_id={run.id}") logger.reflect(f"Compliance report persisted for run_id={run.id}")
@@ -418,6 +540,8 @@ async def start_check(
"status": "running", "status": "running",
"started_at": run.started_at.isoformat() if run.started_at else None, "started_at": run.started_at.isoformat() if run.started_at else None,
} }
# [/DEF:start_check:Function] # [/DEF:start_check:Function]
@@ -426,11 +550,17 @@ async def start_check(
# @PRE: check_run_id references an existing run. # @PRE: check_run_id references an existing run.
# @POST: Deterministic payload shape includes checks and violations arrays. # @POST: Deterministic payload shape includes checks and violations arrays.
@router.get("/checks/{check_run_id}") @router.get("/checks/{check_run_id}")
async def get_check_status(check_run_id: str, repository: CleanReleaseRepository = Depends(get_clean_release_repository)): async def get_check_status(
check_run_id: str,
repository: CleanReleaseRepository = Depends(get_clean_release_repository),
):
with belief_scope("clean_release.get_check_status"): with belief_scope("clean_release.get_check_status"):
run = repository.get_check_run(check_run_id) run = repository.get_check_run(check_run_id)
if run is None: if run is None:
raise HTTPException(status_code=404, detail={"message": "Check run not found", "code": "CHECK_NOT_FOUND"}) raise HTTPException(
status_code=404,
detail={"message": "Check run not found", "code": "CHECK_NOT_FOUND"},
)
logger.reflect(f"Returning check status for check_run_id={check_run_id}") logger.reflect(f"Returning check status for check_run_id={check_run_id}")
checks = [ checks = [
@@ -462,6 +592,8 @@ async def get_check_status(check_run_id: str, repository: CleanReleaseRepository
"checks": checks, "checks": checks,
"violations": violations, "violations": violations,
} }
# [/DEF:get_check_status:Function] # [/DEF:get_check_status:Function]
@@ -470,11 +602,17 @@ async def get_check_status(check_run_id: str, repository: CleanReleaseRepository
# @PRE: report_id references an existing report. # @PRE: report_id references an existing report.
# @POST: Returns serialized report object. # @POST: Returns serialized report object.
@router.get("/reports/{report_id}") @router.get("/reports/{report_id}")
async def get_report(report_id: str, repository: CleanReleaseRepository = Depends(get_clean_release_repository)): async def get_report(
report_id: str,
repository: CleanReleaseRepository = Depends(get_clean_release_repository),
):
with belief_scope("clean_release.get_report"): with belief_scope("clean_release.get_report"):
report = repository.get_report(report_id) report = repository.get_report(report_id)
if report is None: if report is None:
raise HTTPException(status_code=404, detail={"message": "Report not found", "code": "REPORT_NOT_FOUND"}) raise HTTPException(
status_code=404,
detail={"message": "Report not found", "code": "REPORT_NOT_FOUND"},
)
logger.reflect(f"Returning compliance report report_id={report_id}") logger.reflect(f"Returning compliance report report_id={report_id}")
return { return {
@@ -482,11 +620,17 @@ async def get_report(report_id: str, repository: CleanReleaseRepository = Depend
"check_run_id": report.run_id, "check_run_id": report.run_id,
"candidate_id": report.candidate_id, "candidate_id": report.candidate_id,
"final_status": getattr(report.final_status, "value", report.final_status), "final_status": getattr(report.final_status, "value", report.final_status),
"generated_at": report.generated_at.isoformat() if getattr(report, "generated_at", None) else None, "generated_at": report.generated_at.isoformat()
if getattr(report, "generated_at", None)
else None,
"operator_summary": getattr(report, "operator_summary", ""), "operator_summary": getattr(report, "operator_summary", ""),
"structured_payload_ref": getattr(report, "structured_payload_ref", None), "structured_payload_ref": getattr(report, "structured_payload_ref", None),
"violations_count": getattr(report, "violations_count", 0), "violations_count": getattr(report, "violations_count", 0),
"blocking_violations_count": getattr(report, "blocking_violations_count", 0), "blocking_violations_count": getattr(
report, "blocking_violations_count", 0
),
} }
# [/DEF:get_report:Function] # [/DEF:get_report:Function]
# [/DEF:backend.src.api.routes.clean_release:Module] # [/DEF:backend.src.api.routes.clean_release:Module]

View File

@@ -1,16 +1,26 @@
# [DEF:backend.src.api.routes.clean_release_v2:Module] # [DEF:CleanReleaseV2Api:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 4
# @PURPOSE: Redesigned clean release API for headless candidate lifecycle. # @PURPOSE: Redesigned clean release API for headless candidate lifecycle.
from fastapi import APIRouter, Depends, HTTPException, status from fastapi import APIRouter, Depends, HTTPException, status
from typing import List, Dict, Any from typing import List, Dict, Any
from datetime import datetime, timezone from datetime import datetime, timezone
from ...services.clean_release.approval_service import approve_candidate, reject_candidate from ...services.clean_release.approval_service import (
from ...services.clean_release.publication_service import publish_candidate, revoke_publication approve_candidate,
reject_candidate,
)
from ...services.clean_release.publication_service import (
publish_candidate,
revoke_publication,
)
from ...services.clean_release.repository import CleanReleaseRepository from ...services.clean_release.repository import CleanReleaseRepository
from ...dependencies import get_clean_release_repository from ...dependencies import get_clean_release_repository
from ...services.clean_release.enums import CandidateStatus from ...services.clean_release.enums import CandidateStatus
from ...models.clean_release import ReleaseCandidate, CandidateArtifact, DistributionManifest from ...models.clean_release import (
ReleaseCandidate,
CandidateArtifact,
DistributionManifest,
)
from ...services.clean_release.dto import CandidateDTO, ManifestDTO from ...services.clean_release.dto import CandidateDTO, ManifestDTO
router = APIRouter(prefix="/api/v2/clean-release", tags=["Clean Release V2"]) router = APIRouter(prefix="/api/v2/clean-release", tags=["Clean Release V2"])
@@ -22,6 +32,8 @@ router = APIRouter(prefix="/api/v2/clean-release", tags=["Clean Release V2"])
# @RELATION: USES -> [CandidateDTO] # @RELATION: USES -> [CandidateDTO]
class ApprovalRequest(dict): class ApprovalRequest(dict):
pass pass
# [/DEF:ApprovalRequest:Class] # [/DEF:ApprovalRequest:Class]
@@ -31,6 +43,8 @@ class ApprovalRequest(dict):
# @RELATION: USES -> [CandidateDTO] # @RELATION: USES -> [CandidateDTO]
class PublishRequest(dict): class PublishRequest(dict):
pass pass
# [/DEF:PublishRequest:Class] # [/DEF:PublishRequest:Class]
@@ -40,8 +54,11 @@ class PublishRequest(dict):
# @RELATION: USES -> [CandidateDTO] # @RELATION: USES -> [CandidateDTO]
class RevokeRequest(dict): class RevokeRequest(dict):
pass pass
# [/DEF:RevokeRequest:Class] # [/DEF:RevokeRequest:Class]
# [DEF:register_candidate:Function] # [DEF:register_candidate:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Register a new release candidate. # @PURPOSE: Register a new release candidate.
@@ -50,10 +67,12 @@ class RevokeRequest(dict):
# @RETURN: CandidateDTO # @RETURN: CandidateDTO
# @RELATION: CALLS -> [CleanReleaseRepository.save_candidate] # @RELATION: CALLS -> [CleanReleaseRepository.save_candidate]
# @RELATION: USES -> [CandidateDTO] # @RELATION: USES -> [CandidateDTO]
@router.post("/candidates", response_model=CandidateDTO, status_code=status.HTTP_201_CREATED) @router.post(
"/candidates", response_model=CandidateDTO, status_code=status.HTTP_201_CREATED
)
async def register_candidate( async def register_candidate(
payload: Dict[str, Any], payload: Dict[str, Any],
repository: CleanReleaseRepository = Depends(get_clean_release_repository) repository: CleanReleaseRepository = Depends(get_clean_release_repository),
): ):
candidate = ReleaseCandidate( candidate = ReleaseCandidate(
id=payload["id"], id=payload["id"],
@@ -61,7 +80,7 @@ async def register_candidate(
source_snapshot_ref=payload["source_snapshot_ref"], source_snapshot_ref=payload["source_snapshot_ref"],
created_by=payload["created_by"], created_by=payload["created_by"],
created_at=datetime.now(timezone.utc), created_at=datetime.now(timezone.utc),
status=CandidateStatus.DRAFT.value status=CandidateStatus.DRAFT.value,
) )
repository.save_candidate(candidate) repository.save_candidate(candidate)
return CandidateDTO( return CandidateDTO(
@@ -70,10 +89,13 @@ async def register_candidate(
source_snapshot_ref=candidate.source_snapshot_ref, source_snapshot_ref=candidate.source_snapshot_ref,
created_at=candidate.created_at, created_at=candidate.created_at,
created_by=candidate.created_by, created_by=candidate.created_by,
status=CandidateStatus(candidate.status) status=CandidateStatus(candidate.status),
) )
# [/DEF:register_candidate:Function] # [/DEF:register_candidate:Function]
# [DEF:import_artifacts:Function] # [DEF:import_artifacts:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Associate artifacts with a release candidate. # @PURPOSE: Associate artifacts with a release candidate.
@@ -84,7 +106,7 @@ async def register_candidate(
async def import_artifacts( async def import_artifacts(
candidate_id: str, candidate_id: str,
payload: Dict[str, Any], payload: Dict[str, Any],
repository: CleanReleaseRepository = Depends(get_clean_release_repository) repository: CleanReleaseRepository = Depends(get_clean_release_repository),
): ):
candidate = repository.get_candidate(candidate_id) candidate = repository.get_candidate(candidate_id)
if not candidate: if not candidate:
@@ -96,15 +118,18 @@ async def import_artifacts(
candidate_id=candidate_id, candidate_id=candidate_id,
path=art_data["path"], path=art_data["path"],
sha256=art_data["sha256"], sha256=art_data["sha256"],
size=art_data["size"] size=art_data["size"],
) )
# In a real repo we'd have save_artifact # In a real repo we'd have save_artifact
# repository.save_artifact(artifact) # repository.save_artifact(artifact)
pass pass
return {"status": "success"} return {"status": "success"}
# [/DEF:import_artifacts:Function] # [/DEF:import_artifacts:Function]
# [DEF:build_manifest:Function] # [DEF:build_manifest:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Generate distribution manifest for a candidate. # @PURPOSE: Generate distribution manifest for a candidate.
@@ -113,10 +138,14 @@ async def import_artifacts(
# @RETURN: ManifestDTO # @RETURN: ManifestDTO
# @RELATION: CALLS -> [CleanReleaseRepository.save_manifest] # @RELATION: CALLS -> [CleanReleaseRepository.save_manifest]
# @RELATION: CALLS -> [CleanReleaseRepository.get_candidate] # @RELATION: CALLS -> [CleanReleaseRepository.get_candidate]
@router.post("/candidates/{candidate_id}/manifests", response_model=ManifestDTO, status_code=status.HTTP_201_CREATED) @router.post(
"/candidates/{candidate_id}/manifests",
response_model=ManifestDTO,
status_code=status.HTTP_201_CREATED,
)
async def build_manifest( async def build_manifest(
candidate_id: str, candidate_id: str,
repository: CleanReleaseRepository = Depends(get_clean_release_repository) repository: CleanReleaseRepository = Depends(get_clean_release_repository),
): ):
candidate = repository.get_candidate(candidate_id) candidate = repository.get_candidate(candidate_id)
if not candidate: if not candidate:
@@ -131,7 +160,7 @@ async def build_manifest(
created_by="system", created_by="system",
created_at=datetime.now(timezone.utc), created_at=datetime.now(timezone.utc),
source_snapshot_ref=candidate.source_snapshot_ref, source_snapshot_ref=candidate.source_snapshot_ref,
content_json={"items": [], "summary": {}} content_json={"items": [], "summary": {}},
) )
repository.save_manifest(manifest) repository.save_manifest(manifest)
@@ -144,10 +173,13 @@ async def build_manifest(
created_at=manifest.created_at, created_at=manifest.created_at,
created_by=manifest.created_by, created_by=manifest.created_by,
source_snapshot_ref=manifest.source_snapshot_ref, source_snapshot_ref=manifest.source_snapshot_ref,
content_json=manifest.content_json content_json=manifest.content_json,
) )
# [/DEF:build_manifest:Function] # [/DEF:build_manifest:Function]
# [DEF:approve_candidate_endpoint:Function] # [DEF:approve_candidate_endpoint:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Endpoint to record candidate approval. # @PURPOSE: Endpoint to record candidate approval.
@@ -167,9 +199,13 @@ async def approve_candidate_endpoint(
comment=payload.get("comment"), comment=payload.get("comment"),
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
raise HTTPException(status_code=409, detail={"message": str(exc), "code": "APPROVAL_GATE_ERROR"}) raise HTTPException(
status_code=409, detail={"message": str(exc), "code": "APPROVAL_GATE_ERROR"}
)
return {"status": "ok", "decision": decision.decision, "decision_id": decision.id} return {"status": "ok", "decision": decision.decision, "decision_id": decision.id}
# [/DEF:approve_candidate_endpoint:Function] # [/DEF:approve_candidate_endpoint:Function]
@@ -192,9 +228,13 @@ async def reject_candidate_endpoint(
comment=payload.get("comment"), comment=payload.get("comment"),
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
raise HTTPException(status_code=409, detail={"message": str(exc), "code": "APPROVAL_GATE_ERROR"}) raise HTTPException(
status_code=409, detail={"message": str(exc), "code": "APPROVAL_GATE_ERROR"}
)
return {"status": "ok", "decision": decision.decision, "decision_id": decision.id} return {"status": "ok", "decision": decision.decision, "decision_id": decision.id}
# [/DEF:reject_candidate_endpoint:Function] # [/DEF:reject_candidate_endpoint:Function]
@@ -218,7 +258,10 @@ async def publish_candidate_endpoint(
publication_ref=payload.get("publication_ref"), publication_ref=payload.get("publication_ref"),
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
raise HTTPException(status_code=409, detail={"message": str(exc), "code": "PUBLICATION_GATE_ERROR"}) raise HTTPException(
status_code=409,
detail={"message": str(exc), "code": "PUBLICATION_GATE_ERROR"},
)
return { return {
"status": "ok", "status": "ok",
@@ -227,12 +270,16 @@ async def publish_candidate_endpoint(
"candidate_id": publication.candidate_id, "candidate_id": publication.candidate_id,
"report_id": publication.report_id, "report_id": publication.report_id,
"published_by": publication.published_by, "published_by": publication.published_by,
"published_at": publication.published_at.isoformat() if publication.published_at else None, "published_at": publication.published_at.isoformat()
if publication.published_at
else None,
"target_channel": publication.target_channel, "target_channel": publication.target_channel,
"publication_ref": publication.publication_ref, "publication_ref": publication.publication_ref,
"status": publication.status, "status": publication.status,
}, },
} }
# [/DEF:publish_candidate_endpoint:Function] # [/DEF:publish_candidate_endpoint:Function]
@@ -254,7 +301,10 @@ async def revoke_publication_endpoint(
comment=payload.get("comment"), comment=payload.get("comment"),
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
raise HTTPException(status_code=409, detail={"message": str(exc), "code": "PUBLICATION_GATE_ERROR"}) raise HTTPException(
status_code=409,
detail={"message": str(exc), "code": "PUBLICATION_GATE_ERROR"},
)
return { return {
"status": "ok", "status": "ok",
@@ -263,12 +313,16 @@ async def revoke_publication_endpoint(
"candidate_id": publication.candidate_id, "candidate_id": publication.candidate_id,
"report_id": publication.report_id, "report_id": publication.report_id,
"published_by": publication.published_by, "published_by": publication.published_by,
"published_at": publication.published_at.isoformat() if publication.published_at else None, "published_at": publication.published_at.isoformat()
if publication.published_at
else None,
"target_channel": publication.target_channel, "target_channel": publication.target_channel,
"publication_ref": publication.publication_ref, "publication_ref": publication.publication_ref,
"status": publication.status, "status": publication.status,
}, },
} }
# [/DEF:revoke_publication_endpoint:Function] # [/DEF:revoke_publication_endpoint:Function]
# [/DEF:backend.src.api.routes.clean_release_v2:Module] # [/DEF:CleanReleaseV2Api:Module]

View File

@@ -269,7 +269,7 @@ class LaunchDatasetResponse(BaseModel):
# [DEF:_require_auto_review_flag:Function] # [DEF:_require_auto_review_flag:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Guard US1 dataset review endpoints behind the configured feature flag. # @PURPOSE: Guard US1 dataset review endpoints behind the configured feature flag.
# @RELATION: [DEPENDS_ON] ->[ConfigManager] # @RELATION: [DEPENDS_ON] ->[ConfigManager]
def _require_auto_review_flag(config_manager=Depends(get_config_manager)) -> bool: def _require_auto_review_flag(config_manager=Depends(get_config_manager)) -> bool:
@@ -284,7 +284,7 @@ def _require_auto_review_flag(config_manager=Depends(get_config_manager)) -> boo
# [DEF:_require_clarification_flag:Function] # [DEF:_require_clarification_flag:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Guard clarification-specific US2 endpoints behind the configured feature flag. # @PURPOSE: Guard clarification-specific US2 endpoints behind the configured feature flag.
# @RELATION: [DEPENDS_ON] ->[ConfigManager] # @RELATION: [DEPENDS_ON] ->[ConfigManager]
def _require_clarification_flag(config_manager=Depends(get_config_manager)) -> bool: def _require_clarification_flag(config_manager=Depends(get_config_manager)) -> bool:
@@ -299,7 +299,7 @@ def _require_clarification_flag(config_manager=Depends(get_config_manager)) -> b
# [DEF:_require_execution_flag:Function] # [DEF:_require_execution_flag:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Guard US3 execution endpoints behind the configured feature flag. # @PURPOSE: Guard US3 execution endpoints behind the configured feature flag.
# @RELATION: [DEPENDS_ON] ->[ConfigManager] # @RELATION: [DEPENDS_ON] ->[ConfigManager]
def _require_execution_flag(config_manager=Depends(get_config_manager)) -> bool: def _require_execution_flag(config_manager=Depends(get_config_manager)) -> bool:
@@ -322,7 +322,7 @@ def _get_repository(db: Session = Depends(get_db)) -> DatasetReviewSessionReposi
# [DEF:_get_orchestrator:Function] # [DEF:_get_orchestrator:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Build orchestrator dependency for session lifecycle actions. # @PURPOSE: Build orchestrator dependency for session lifecycle actions.
# @RELATION: [DEPENDS_ON] ->[DatasetReviewOrchestrator] # @RELATION: [DEPENDS_ON] ->[DatasetReviewOrchestrator]
def _get_orchestrator( def _get_orchestrator(
@@ -339,7 +339,7 @@ def _get_orchestrator(
# [DEF:_get_clarification_engine:Function] # [DEF:_get_clarification_engine:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Build clarification engine dependency for one-question-at-a-time guided clarification mutations. # @PURPOSE: Build clarification engine dependency for one-question-at-a-time guided clarification mutations.
# @RELATION: [DEPENDS_ON] ->[ClarificationEngine] # @RELATION: [DEPENDS_ON] ->[ClarificationEngine]
def _get_clarification_engine( def _get_clarification_engine(
@@ -350,7 +350,7 @@ def _get_clarification_engine(
# [DEF:_serialize_session_summary:Function] # [DEF:_serialize_session_summary:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Map SQLAlchemy session aggregate root into stable API summary DTO. # @PURPOSE: Map SQLAlchemy session aggregate root into stable API summary DTO.
# @RELATION: [DEPENDS_ON] ->[SessionSummary] # @RELATION: [DEPENDS_ON] ->[SessionSummary]
def _serialize_session_summary(session: DatasetReviewSession) -> SessionSummary: def _serialize_session_summary(session: DatasetReviewSession) -> SessionSummary:
@@ -359,7 +359,7 @@ def _serialize_session_summary(session: DatasetReviewSession) -> SessionSummary:
# [DEF:_serialize_session_detail:Function] # [DEF:_serialize_session_detail:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Map SQLAlchemy session aggregate root into stable API detail DTO. # @PURPOSE: Map SQLAlchemy session aggregate root into stable API detail DTO.
# @RELATION: [DEPENDS_ON] ->[SessionDetail] # @RELATION: [DEPENDS_ON] ->[SessionDetail]
def _serialize_session_detail(session: DatasetReviewSession) -> SessionDetail: def _serialize_session_detail(session: DatasetReviewSession) -> SessionDetail:
@@ -368,7 +368,7 @@ def _serialize_session_detail(session: DatasetReviewSession) -> SessionDetail:
# [DEF:_serialize_semantic_field:Function] # [DEF:_serialize_semantic_field:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Map one semantic field aggregate into stable field-level DTO output. # @PURPOSE: Map one semantic field aggregate into stable field-level DTO output.
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntryDto] # @RELATION: [DEPENDS_ON] ->[SemanticFieldEntryDto]
def _serialize_semantic_field(field: SemanticFieldEntry) -> SemanticFieldEntryDto: def _serialize_semantic_field(field: SemanticFieldEntry) -> SemanticFieldEntryDto:
@@ -377,7 +377,7 @@ def _serialize_semantic_field(field: SemanticFieldEntry) -> SemanticFieldEntryDt
# [DEF:_serialize_clarification_question_payload:Function] # [DEF:_serialize_clarification_question_payload:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Convert clarification engine payload into API DTO aligned with the clarification contract. # @PURPOSE: Convert clarification engine payload into API DTO aligned with the clarification contract.
# @RELATION: [DEPENDS_ON] ->[ClarificationQuestionDto] # @RELATION: [DEPENDS_ON] ->[ClarificationQuestionDto]
def _serialize_clarification_question_payload( def _serialize_clarification_question_payload(
@@ -405,7 +405,7 @@ def _serialize_clarification_question_payload(
# [DEF:_serialize_clarification_state:Function] # [DEF:_serialize_clarification_state:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Convert clarification engine state into stable API response payload. # @PURPOSE: Convert clarification engine state into stable API response payload.
# @RELATION: [DEPENDS_ON] ->[ClarificationStateResponse] # @RELATION: [DEPENDS_ON] ->[ClarificationStateResponse]
def _serialize_clarification_state( def _serialize_clarification_state(
@@ -473,7 +473,7 @@ def _require_owner_mutation_scope(
# [DEF:_record_session_event:Function] # [DEF:_record_session_event:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Persist one explicit audit event for an owned dataset-review mutation endpoint. # @PURPOSE: Persist one explicit audit event for an owned dataset-review mutation endpoint.
# @RELATION: [CALLS] ->[SessionEventLogger.log_for_session] # @RELATION: [CALLS] ->[SessionEventLogger.log_for_session]
def _record_session_event( def _record_session_event(
@@ -534,7 +534,7 @@ def _get_owned_field_or_404(
# [DEF:_get_latest_clarification_session_or_404:Function] # [DEF:_get_latest_clarification_session_or_404:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Resolve the latest clarification aggregate for one session or raise when clarification is unavailable. # @PURPOSE: Resolve the latest clarification aggregate for one session or raise when clarification is unavailable.
# @RELATION: [DEPENDS_ON] ->[ClarificationSession] # @RELATION: [DEPENDS_ON] ->[ClarificationSession]
def _get_latest_clarification_session_or_404( def _get_latest_clarification_session_or_404(
@@ -565,7 +565,7 @@ def _map_candidate_provenance(candidate: SemanticCandidate) -> FieldProvenance:
# [DEF:_resolve_candidate_source_version:Function] # [DEF:_resolve_candidate_source_version:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Resolve the semantic source version for one accepted candidate from the loaded session aggregate. # @PURPOSE: Resolve the semantic source version for one accepted candidate from the loaded session aggregate.
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry] # @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry]
# @RELATION: [DEPENDS_ON] ->[SemanticSource] # @RELATION: [DEPENDS_ON] ->[SemanticSource]
@@ -653,7 +653,7 @@ def _update_semantic_field_state(
# [DEF:_serialize_execution_mapping:Function] # [DEF:_serialize_execution_mapping:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Map one persisted execution mapping into stable API DTO output. # @PURPOSE: Map one persisted execution mapping into stable API DTO output.
# @RELATION: [DEPENDS_ON] ->[ExecutionMappingDto] # @RELATION: [DEPENDS_ON] ->[ExecutionMappingDto]
def _serialize_execution_mapping(mapping: ExecutionMapping) -> ExecutionMappingDto: def _serialize_execution_mapping(mapping: ExecutionMapping) -> ExecutionMappingDto:
@@ -662,7 +662,7 @@ def _serialize_execution_mapping(mapping: ExecutionMapping) -> ExecutionMappingD
# [DEF:_serialize_run_context:Function] # [DEF:_serialize_run_context:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Map one persisted launch run context into stable API DTO output for SQL Lab handoff confirmation. # @PURPOSE: Map one persisted launch run context into stable API DTO output for SQL Lab handoff confirmation.
# @RELATION: [DEPENDS_ON] ->[DatasetRunContextDto] # @RELATION: [DEPENDS_ON] ->[DatasetRunContextDto]
def _serialize_run_context(run_context) -> DatasetRunContextDto: def _serialize_run_context(run_context) -> DatasetRunContextDto:
@@ -671,7 +671,7 @@ def _serialize_run_context(run_context) -> DatasetRunContextDto:
# [DEF:_build_sql_lab_redirect_url:Function] # [DEF:_build_sql_lab_redirect_url:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Build a stable SQL Lab redirect URL from the configured Superset environment and persisted run context reference. # @PURPOSE: Build a stable SQL Lab redirect URL from the configured Superset environment and persisted run context reference.
# @RELATION: [DEPENDS_ON] ->[DatasetRunContextDto] # @RELATION: [DEPENDS_ON] ->[DatasetRunContextDto]
def _build_sql_lab_redirect_url(environment_url: str, sql_lab_session_ref: str) -> str: def _build_sql_lab_redirect_url(environment_url: str, sql_lab_session_ref: str) -> str:
@@ -692,7 +692,7 @@ def _build_sql_lab_redirect_url(environment_url: str, sql_lab_session_ref: str)
# [DEF:_build_documentation_export:Function] # [DEF:_build_documentation_export:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Produce session documentation export content from current persisted review state. # @PURPOSE: Produce session documentation export content from current persisted review state.
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession] # @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
def _build_documentation_export(session: DatasetReviewSession, export_format: ArtifactFormat) -> Dict[str, Any]: def _build_documentation_export(session: DatasetReviewSession, export_format: ArtifactFormat) -> Dict[str, Any]:
@@ -747,7 +747,7 @@ def _build_documentation_export(session: DatasetReviewSession, export_format: Ar
# [DEF:_build_validation_export:Function] # [DEF:_build_validation_export:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Produce validation-focused export content from persisted findings and readiness state. # @PURPOSE: Produce validation-focused export content from persisted findings and readiness state.
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession] # @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
def _build_validation_export(session: DatasetReviewSession, export_format: ArtifactFormat) -> Dict[str, Any]: def _build_validation_export(session: DatasetReviewSession, export_format: ArtifactFormat) -> Dict[str, Any]:

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.datasets:Module] # [DEF:DatasetsApi:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: api, datasets, resources, hub # @SEMANTICS: api, datasets, resources, hub
@@ -423,4 +423,4 @@ async def get_dataset_detail(
raise HTTPException(status_code=503, detail=f"Failed to fetch dataset detail: {str(e)}") raise HTTPException(status_code=503, detail=f"Failed to fetch dataset detail: {str(e)}")
# [/DEF:get_dataset_detail:Function] # [/DEF:get_dataset_detail:Function]
# [/DEF:backend.src.api.routes.datasets:Module] # [/DEF:DatasetsApi:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.environments:Module] # [DEF:EnvironmentsApi:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: api, environments, superset, databases # @SEMANTICS: api, environments, superset, databases
@@ -156,4 +156,4 @@ async def get_environment_databases(
raise HTTPException(status_code=500, detail=f"Failed to fetch databases: {str(e)}") raise HTTPException(status_code=500, detail=f"Failed to fetch databases: {str(e)}")
# [/DEF:get_environment_databases:Function] # [/DEF:get_environment_databases:Function]
# [/DEF:backend.src.api.routes.environments:Module] # [/DEF:EnvironmentsApi:Module]

View File

@@ -1,6 +1,6 @@
# [DEF:backend.src.api.routes.git_schemas:Module] # [DEF:GitSchemas:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 1
# @SEMANTICS: git, schemas, pydantic, api, contracts # @SEMANTICS: git, schemas, pydantic, api, contracts
# @PURPOSE: Defines Pydantic models for the Git integration API layer. # @PURPOSE: Defines Pydantic models for the Git integration API layer.
# @LAYER: API # @LAYER: API
@@ -290,4 +290,4 @@ class PromoteResponse(BaseModel):
policy_violation: bool = False policy_violation: bool = False
# [/DEF:PromoteResponse:Class] # [/DEF:PromoteResponse:Class]
# [/DEF:backend.src.api.routes.git_schemas:Module] # [/DEF:GitSchemas:Module]

View File

@@ -1,5 +1,5 @@
# [DEF:backend/src/api/routes/llm.py:Module] # [DEF:backend/src/api/routes/llm.py:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @SEMANTICS: api, routes, llm # @SEMANTICS: api, routes, llm
# @PURPOSE: API routes for LLM provider configuration and management. # @PURPOSE: API routes for LLM provider configuration and management.
# @LAYER: UI (API) # @LAYER: UI (API)

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.mappings:Module] # [DEF:MappingsApi:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: api, mappings, database, fuzzy-matching # @SEMANTICS: api, mappings, database, fuzzy-matching
@@ -127,4 +127,4 @@ async def suggest_mappings_api(
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
# [/DEF:suggest_mappings_api:Function] # [/DEF:suggest_mappings_api:Function]
# [/DEF:backend.src.api.routes.mappings:Module] # [/DEF:MappingsApi:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.api.routes.profile:Module] # [DEF:ProfileApiModule:Module]
# #
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: api, profile, preferences, self-service, account-lookup # @SEMANTICS: api, profile, preferences, self-service, account-lookup
@@ -47,6 +47,7 @@ router = APIRouter(prefix="/api/profile", tags=["profile"])
# [DEF:_get_profile_service:Function] # [DEF:_get_profile_service:Function]
# @RELATION: CALLS -> ProfileService
# @PURPOSE: Build profile service for current request scope. # @PURPOSE: Build profile service for current request scope.
# @PRE: db session and config manager are available. # @PRE: db session and config manager are available.
# @POST: Returns a ready ProfileService instance. # @POST: Returns a ready ProfileService instance.
@@ -60,6 +61,7 @@ def _get_profile_service(db: Session, config_manager, plugin_loader=None) -> Pro
# [DEF:get_preferences:Function] # [DEF:get_preferences:Function]
# @RELATION: CALLS -> ProfileService
# @PURPOSE: Get authenticated user's dashboard filter preference. # @PURPOSE: Get authenticated user's dashboard filter preference.
# @PRE: Valid JWT and authenticated user context. # @PRE: Valid JWT and authenticated user context.
# @POST: Returns preference payload for current user only. # @POST: Returns preference payload for current user only.
@@ -78,6 +80,7 @@ async def get_preferences(
# [DEF:update_preferences:Function] # [DEF:update_preferences:Function]
# @RELATION: CALLS -> ProfileService
# @PURPOSE: Update authenticated user's dashboard filter preference. # @PURPOSE: Update authenticated user's dashboard filter preference.
# @PRE: Valid JWT and valid request payload. # @PRE: Valid JWT and valid request payload.
# @POST: Persists normalized preference for current user or raises validation/authorization errors. # @POST: Persists normalized preference for current user or raises validation/authorization errors.
@@ -104,6 +107,7 @@ async def update_preferences(
# [DEF:lookup_superset_accounts:Function] # [DEF:lookup_superset_accounts:Function]
# @RELATION: CALLS -> ProfileService
# @PURPOSE: Lookup Superset account candidates in selected environment. # @PURPOSE: Lookup Superset account candidates in selected environment.
# @PRE: Valid JWT, authenticated context, and environment_id query parameter. # @PRE: Valid JWT, authenticated context, and environment_id query parameter.
# @POST: Returns success or degraded lookup payload with stable shape. # @POST: Returns success or degraded lookup payload with stable shape.
@@ -144,4 +148,4 @@ async def lookup_superset_accounts(
raise HTTPException(status_code=404, detail=str(exc)) from exc raise HTTPException(status_code=404, detail=str(exc)) from exc
# [/DEF:lookup_superset_accounts:Function] # [/DEF:lookup_superset_accounts:Function]
# [/DEF:backend.src.api.routes.profile:Module] # [/DEF:ProfileApiModule:Module]

View File

@@ -64,7 +64,7 @@ def _parse_csv_enum_list(raw: Optional[str], enum_cls, field_name: str) -> List:
# [DEF:list_reports:Function] # [DEF:list_reports:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Return paginated unified reports list. # @PURPOSE: Return paginated unified reports list.
# @PRE: authenticated/authorized request and validated query params. # @PRE: authenticated/authorized request and validated query params.
# @POST: returns {items,total,page,page_size,has_next,applied_filters}. # @POST: returns {items,total,page,page_size,has_next,applied_filters}.
@@ -131,7 +131,7 @@ async def list_reports(
# [DEF:get_report_detail:Function] # [DEF:get_report_detail:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Return one normalized report detail with diagnostics and next actions. # @PURPOSE: Return one normalized report detail with diagnostics and next actions.
# @PRE: authenticated/authorized request and existing report_id. # @PRE: authenticated/authorized request and existing report_id.
# @POST: returns normalized detail envelope or 404 when report is not found. # @POST: returns normalized detail envelope or 404 when report is not found.

View File

@@ -1,6 +1,6 @@
# [DEF:SettingsRouter:Module] # [DEF:SettingsRouter:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 4
# @SEMANTICS: settings, api, router, fastapi # @SEMANTICS: settings, api, router, fastapi
# @PURPOSE: Provides API endpoints for managing application settings and Superset environments. # @PURPOSE: Provides API endpoints for managing application settings and Superset environments.
# @LAYER: UI (API) # @LAYER: UI (API)
@@ -23,11 +23,16 @@ from ...core.superset_client import SupersetClient
from ...services.llm_prompt_templates import normalize_llm_settings from ...services.llm_prompt_templates import normalize_llm_settings
from ...models.llm import ValidationPolicy from ...models.llm import ValidationPolicy
from ...models.config import AppConfigRecord from ...models.config import AppConfigRecord
from ...schemas.settings import ValidationPolicyCreate, ValidationPolicyUpdate, ValidationPolicyResponse from ...schemas.settings import (
ValidationPolicyCreate,
ValidationPolicyUpdate,
ValidationPolicyResponse,
)
from ...core.database import get_db from ...core.database import get_db
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
# [/SECTION] # [/SECTION]
# [DEF:LoggingConfigResponse:Class] # [DEF:LoggingConfigResponse:Class]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Response model for logging configuration with current task log level. # @PURPOSE: Response model for logging configuration with current task log level.
@@ -36,6 +41,8 @@ class LoggingConfigResponse(BaseModel):
level: str level: str
task_log_level: str task_log_level: str
enable_belief_state: bool enable_belief_state: bool
# [/DEF:LoggingConfigResponse:Class] # [/DEF:LoggingConfigResponse:Class]
router = APIRouter() router = APIRouter()
@@ -51,11 +58,13 @@ def _normalize_superset_env_url(raw_url: str) -> str:
if normalized.lower().endswith("/api/v1"): if normalized.lower().endswith("/api/v1"):
normalized = normalized[: -len("/api/v1")] normalized = normalized[: -len("/api/v1")]
return normalized.rstrip("/") return normalized.rstrip("/")
# [/DEF:_normalize_superset_env_url:Function] # [/DEF:_normalize_superset_env_url:Function]
# [DEF:_validate_superset_connection_fast:Function] # [DEF:_validate_superset_connection_fast:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Run lightweight Superset connectivity validation without full pagination scan. # @PURPOSE: Run lightweight Superset connectivity validation without full pagination scan.
# @PRE: env contains valid URL and credentials. # @PRE: env contains valid URL and credentials.
# @POST: Raises on auth/API failures; returns None on success. # @POST: Raises on auth/API failures; returns None on success.
@@ -71,10 +80,13 @@ def _validate_superset_connection_fast(env: Environment) -> None:
"columns": ["id"], "columns": ["id"],
} }
) )
# [/DEF:_validate_superset_connection_fast:Function] # [/DEF:_validate_superset_connection_fast:Function]
# [DEF:get_settings:Function] # [DEF:get_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Retrieves all application settings. # @PURPOSE: Retrieves all application settings.
# @PRE: Config manager is available. # @PRE: Config manager is available.
# @POST: Returns masked AppConfig. # @POST: Returns masked AppConfig.
@@ -82,7 +94,7 @@ def _validate_superset_connection_fast(env: Environment) -> None:
@router.get("", response_model=AppConfig) @router.get("", response_model=AppConfig)
async def get_settings( async def get_settings(
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ")) _=Depends(has_permission("admin:settings", "READ")),
): ):
with belief_scope("get_settings"): with belief_scope("get_settings"):
logger.info("[get_settings][Entry] Fetching all settings") logger.info("[get_settings][Entry] Fetching all settings")
@@ -93,10 +105,13 @@ async def get_settings(
if env.password: if env.password:
env.password = "********" env.password = "********"
return config return config
# [/DEF:get_settings:Function] # [/DEF:get_settings:Function]
# [DEF:update_global_settings:Function] # [DEF:update_global_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Updates global application settings. # @PURPOSE: Updates global application settings.
# @PRE: New settings are provided. # @PRE: New settings are provided.
# @POST: Global settings are updated. # @POST: Global settings are updated.
@@ -106,30 +121,36 @@ async def get_settings(
async def update_global_settings( async def update_global_settings(
settings: GlobalSettings, settings: GlobalSettings,
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("update_global_settings"): with belief_scope("update_global_settings"):
logger.info("[update_global_settings][Entry] Updating global settings") logger.info("[update_global_settings][Entry] Updating global settings")
config_manager.update_global_settings(settings) config_manager.update_global_settings(settings)
return settings return settings
# [/DEF:update_global_settings:Function] # [/DEF:update_global_settings:Function]
# [DEF:get_storage_settings:Function] # [DEF:get_storage_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Retrieves storage-specific settings. # @PURPOSE: Retrieves storage-specific settings.
# @RETURN: StorageConfig - The storage configuration. # @RETURN: StorageConfig - The storage configuration.
@router.get("/storage", response_model=StorageConfig) @router.get("/storage", response_model=StorageConfig)
async def get_storage_settings( async def get_storage_settings(
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ")) _=Depends(has_permission("admin:settings", "READ")),
): ):
with belief_scope("get_storage_settings"): with belief_scope("get_storage_settings"):
return config_manager.get_config().settings.storage return config_manager.get_config().settings.storage
# [/DEF:get_storage_settings:Function] # [/DEF:get_storage_settings:Function]
# [DEF:update_storage_settings:Function] # [DEF:update_storage_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Updates storage-specific settings. # @PURPOSE: Updates storage-specific settings.
# @PARAM: storage (StorageConfig) - The new storage settings. # @PARAM: storage (StorageConfig) - The new storage settings.
# @POST: Storage settings are updated and saved. # @POST: Storage settings are updated and saved.
@@ -138,7 +159,7 @@ async def get_storage_settings(
async def update_storage_settings( async def update_storage_settings(
storage: StorageConfig, storage: StorageConfig,
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("update_storage_settings"): with belief_scope("update_storage_settings"):
is_valid, message = config_manager.validate_path(storage.root_path) is_valid, message = config_manager.validate_path(storage.root_path)
@@ -149,10 +170,13 @@ async def update_storage_settings(
settings.storage = storage settings.storage = storage
config_manager.update_global_settings(settings) config_manager.update_global_settings(settings)
return config_manager.get_config().settings.storage return config_manager.get_config().settings.storage
# [/DEF:update_storage_settings:Function] # [/DEF:update_storage_settings:Function]
# [DEF:get_environments:Function] # [DEF:get_environments:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Lists all configured Superset environments. # @PURPOSE: Lists all configured Superset environments.
# @PRE: Config manager is available. # @PRE: Config manager is available.
# @POST: Returns list of environments. # @POST: Returns list of environments.
@@ -160,7 +184,7 @@ async def update_storage_settings(
@router.get("/environments", response_model=List[Environment]) @router.get("/environments", response_model=List[Environment])
async def get_environments( async def get_environments(
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ")) _=Depends(has_permission("admin:settings", "READ")),
): ):
with belief_scope("get_environments"): with belief_scope("get_environments"):
logger.info("[get_environments][Entry] Fetching environments") logger.info("[get_environments][Entry] Fetching environments")
@@ -169,10 +193,13 @@ async def get_environments(
env.copy(update={"url": _normalize_superset_env_url(env.url)}) env.copy(update={"url": _normalize_superset_env_url(env.url)})
for env in environments for env in environments
] ]
# [/DEF:get_environments:Function] # [/DEF:get_environments:Function]
# [DEF:add_environment:Function] # [DEF:add_environment:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Adds a new Superset environment. # @PURPOSE: Adds a new Superset environment.
# @PRE: Environment data is valid and reachable. # @PRE: Environment data is valid and reachable.
# @POST: Environment is added to config. # @POST: Environment is added to config.
@@ -182,7 +209,7 @@ async def get_environments(
async def add_environment( async def add_environment(
env: Environment, env: Environment,
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("add_environment"): with belief_scope("add_environment"):
logger.info(f"[add_environment][Entry] Adding environment {env.id}") logger.info(f"[add_environment][Entry] Adding environment {env.id}")
@@ -192,15 +219,22 @@ async def add_environment(
try: try:
_validate_superset_connection_fast(env) _validate_superset_connection_fast(env)
except Exception as e: except Exception as e:
logger.error(f"[add_environment][Coherence:Failed] Connection validation failed: {e}") logger.error(
raise HTTPException(status_code=400, detail=f"Connection validation failed: {e}") f"[add_environment][Coherence:Failed] Connection validation failed: {e}"
)
raise HTTPException(
status_code=400, detail=f"Connection validation failed: {e}"
)
config_manager.add_environment(env) config_manager.add_environment(env)
return env return env
# [/DEF:add_environment:Function] # [/DEF:add_environment:Function]
# [DEF:update_environment:Function] # [DEF:update_environment:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Updates an existing Superset environment. # @PURPOSE: Updates an existing Superset environment.
# @PRE: ID and valid environment data are provided. # @PRE: ID and valid environment data are provided.
# @POST: Environment is updated in config. # @POST: Environment is updated in config.
@@ -211,7 +245,7 @@ async def add_environment(
async def update_environment( async def update_environment(
id: str, id: str,
env: Environment, env: Environment,
config_manager: ConfigManager = Depends(get_config_manager) config_manager: ConfigManager = Depends(get_config_manager),
): ):
with belief_scope("update_environment"): with belief_scope("update_environment"):
logger.info(f"[update_environment][Entry] Updating environment {id}") logger.info(f"[update_environment][Entry] Updating environment {id}")
@@ -221,7 +255,9 @@ async def update_environment(
# If password is masked, we need the real one for validation # If password is masked, we need the real one for validation
env_to_validate = env.copy(deep=True) env_to_validate = env.copy(deep=True)
if env_to_validate.password == "********": if env_to_validate.password == "********":
old_env = next((e for e in config_manager.get_environments() if e.id == id), None) old_env = next(
(e for e in config_manager.get_environments() if e.id == id), None
)
if old_env: if old_env:
env_to_validate.password = old_env.password env_to_validate.password = old_env.password
@@ -229,33 +265,42 @@ async def update_environment(
try: try:
_validate_superset_connection_fast(env_to_validate) _validate_superset_connection_fast(env_to_validate)
except Exception as e: except Exception as e:
logger.error(f"[update_environment][Coherence:Failed] Connection validation failed: {e}") logger.error(
raise HTTPException(status_code=400, detail=f"Connection validation failed: {e}") f"[update_environment][Coherence:Failed] Connection validation failed: {e}"
)
raise HTTPException(
status_code=400, detail=f"Connection validation failed: {e}"
)
if config_manager.update_environment(id, env): if config_manager.update_environment(id, env):
return env return env
raise HTTPException(status_code=404, detail=f"Environment {id} not found") raise HTTPException(status_code=404, detail=f"Environment {id} not found")
# [/DEF:update_environment:Function] # [/DEF:update_environment:Function]
# [DEF:delete_environment:Function] # [DEF:delete_environment:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Deletes a Superset environment. # @PURPOSE: Deletes a Superset environment.
# @PRE: ID is provided. # @PRE: ID is provided.
# @POST: Environment is removed from config. # @POST: Environment is removed from config.
# @PARAM: id (str) - The ID of the environment to delete. # @PARAM: id (str) - The ID of the environment to delete.
@router.delete("/environments/{id}") @router.delete("/environments/{id}")
async def delete_environment( async def delete_environment(
id: str, id: str, config_manager: ConfigManager = Depends(get_config_manager)
config_manager: ConfigManager = Depends(get_config_manager)
): ):
with belief_scope("delete_environment"): with belief_scope("delete_environment"):
logger.info(f"[delete_environment][Entry] Deleting environment {id}") logger.info(f"[delete_environment][Entry] Deleting environment {id}")
config_manager.delete_environment(id) config_manager.delete_environment(id)
return {"message": f"Environment {id} deleted"} return {"message": f"Environment {id} deleted"}
# [/DEF:delete_environment:Function] # [/DEF:delete_environment:Function]
# [DEF:test_environment_connection:Function] # [DEF:test_environment_connection:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Tests the connection to a Superset environment. # @PURPOSE: Tests the connection to a Superset environment.
# @PRE: ID is provided. # @PRE: ID is provided.
# @POST: Returns success or error status. # @POST: Returns success or error status.
@@ -263,8 +308,7 @@ async def delete_environment(
# @RETURN: dict - Success message or error. # @RETURN: dict - Success message or error.
@router.post("/environments/{id}/test") @router.post("/environments/{id}/test")
async def test_environment_connection( async def test_environment_connection(
id: str, id: str, config_manager: ConfigManager = Depends(get_config_manager)
config_manager: ConfigManager = Depends(get_config_manager)
): ):
with belief_scope("test_environment_connection"): with belief_scope("test_environment_connection"):
logger.info(f"[test_environment_connection][Entry] Testing environment {id}") logger.info(f"[test_environment_connection][Entry] Testing environment {id}")
@@ -277,15 +321,22 @@ async def test_environment_connection(
try: try:
_validate_superset_connection_fast(env) _validate_superset_connection_fast(env)
logger.info(f"[test_environment_connection][Coherence:OK] Connection successful for {id}") logger.info(
f"[test_environment_connection][Coherence:OK] Connection successful for {id}"
)
return {"status": "success", "message": "Connection successful"} return {"status": "success", "message": "Connection successful"}
except Exception as e: except Exception as e:
logger.error(f"[test_environment_connection][Coherence:Failed] Connection failed for {id}: {e}") logger.error(
f"[test_environment_connection][Coherence:Failed] Connection failed for {id}: {e}"
)
return {"status": "error", "message": str(e)} return {"status": "error", "message": str(e)}
# [/DEF:test_environment_connection:Function] # [/DEF:test_environment_connection:Function]
# [DEF:get_logging_config:Function] # [DEF:get_logging_config:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Retrieves current logging configuration. # @PURPOSE: Retrieves current logging configuration.
# @PRE: Config manager is available. # @PRE: Config manager is available.
# @POST: Returns logging configuration. # @POST: Returns logging configuration.
@@ -293,19 +344,22 @@ async def test_environment_connection(
@router.get("/logging", response_model=LoggingConfigResponse) @router.get("/logging", response_model=LoggingConfigResponse)
async def get_logging_config( async def get_logging_config(
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ")) _=Depends(has_permission("admin:settings", "READ")),
): ):
with belief_scope("get_logging_config"): with belief_scope("get_logging_config"):
logging_config = config_manager.get_config().settings.logging logging_config = config_manager.get_config().settings.logging
return LoggingConfigResponse( return LoggingConfigResponse(
level=logging_config.level, level=logging_config.level,
task_log_level=logging_config.task_log_level, task_log_level=logging_config.task_log_level,
enable_belief_state=logging_config.enable_belief_state enable_belief_state=logging_config.enable_belief_state,
) )
# [/DEF:get_logging_config:Function] # [/DEF:get_logging_config:Function]
# [DEF:update_logging_config:Function] # [DEF:update_logging_config:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Updates logging configuration. # @PURPOSE: Updates logging configuration.
# @PRE: New logging config is provided. # @PRE: New logging config is provided.
# @POST: Logging configuration is updated and saved. # @POST: Logging configuration is updated and saved.
@@ -315,10 +369,12 @@ async def get_logging_config(
async def update_logging_config( async def update_logging_config(
config: LoggingConfig, config: LoggingConfig,
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("update_logging_config"): with belief_scope("update_logging_config"):
logger.info(f"[update_logging_config][Entry] Updating logging config: level={config.level}, task_log_level={config.task_log_level}") logger.info(
f"[update_logging_config][Entry] Updating logging config: level={config.level}, task_log_level={config.task_log_level}"
)
# Get current settings and update logging config # Get current settings and update logging config
settings = config_manager.get_config().settings settings = config_manager.get_config().settings
@@ -328,10 +384,13 @@ async def update_logging_config(
return LoggingConfigResponse( return LoggingConfigResponse(
level=config.level, level=config.level,
task_log_level=config.task_log_level, task_log_level=config.task_log_level,
enable_belief_state=config.enable_belief_state enable_belief_state=config.enable_belief_state,
) )
# [/DEF:update_logging_config:Function] # [/DEF:update_logging_config:Function]
# [DEF:ConsolidatedSettingsResponse:Class] # [DEF:ConsolidatedSettingsResponse:Class]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Response model for consolidated application settings. # @PURPOSE: Response model for consolidated application settings.
@@ -343,10 +402,13 @@ class ConsolidatedSettingsResponse(BaseModel):
logging: dict logging: dict
storage: dict storage: dict
notifications: dict = {} notifications: dict = {}
# [/DEF:ConsolidatedSettingsResponse:Class] # [/DEF:ConsolidatedSettingsResponse:Class]
# [DEF:get_consolidated_settings:Function] # [DEF:get_consolidated_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 4
# @PURPOSE: Retrieves all settings categories in a single call # @PURPOSE: Retrieves all settings categories in a single call
# @PRE: Config manager is available. # @PRE: Config manager is available.
# @POST: Returns all consolidated settings. # @POST: Returns all consolidated settings.
@@ -354,15 +416,18 @@ class ConsolidatedSettingsResponse(BaseModel):
@router.get("/consolidated", response_model=ConsolidatedSettingsResponse) @router.get("/consolidated", response_model=ConsolidatedSettingsResponse)
async def get_consolidated_settings( async def get_consolidated_settings(
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ")) _=Depends(has_permission("admin:settings", "READ")),
): ):
with belief_scope("get_consolidated_settings"): with belief_scope("get_consolidated_settings"):
logger.info("[get_consolidated_settings][Entry] Fetching all consolidated settings") logger.info(
"[get_consolidated_settings][Entry] Fetching all consolidated settings"
)
config = config_manager.get_config() config = config_manager.get_config()
from ...services.llm_provider import LLMProviderService from ...services.llm_provider import LLMProviderService
from ...core.database import SessionLocal from ...core.database import SessionLocal
db = SessionLocal() db = SessionLocal()
notifications_payload = {} notifications_payload = {}
try: try:
@@ -376,13 +441,18 @@ async def get_consolidated_settings(
"base_url": p.base_url, "base_url": p.base_url,
"api_key": "********", "api_key": "********",
"default_model": p.default_model, "default_model": p.default_model,
"is_active": p.is_active "is_active": p.is_active,
} for p in providers }
for p in providers
] ]
config_record = db.query(AppConfigRecord).filter(AppConfigRecord.id == "global").first() config_record = (
db.query(AppConfigRecord).filter(AppConfigRecord.id == "global").first()
)
if config_record and isinstance(config_record.payload, dict): if config_record and isinstance(config_record.payload, dict):
notifications_payload = config_record.payload.get("notifications", {}) or {} notifications_payload = (
config_record.payload.get("notifications", {}) or {}
)
finally: finally:
db.close() db.close()
@@ -395,12 +465,15 @@ async def get_consolidated_settings(
llm_providers=llm_providers_list, llm_providers=llm_providers_list,
logging=config.settings.logging.dict(), logging=config.settings.logging.dict(),
storage=config.settings.storage.dict(), storage=config.settings.storage.dict(),
notifications=notifications_payload notifications=notifications_payload,
) )
# [/DEF:get_consolidated_settings:Function] # [/DEF:get_consolidated_settings:Function]
# [DEF:update_consolidated_settings:Function] # [DEF:update_consolidated_settings:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Bulk update application settings from the consolidated view. # @PURPOSE: Bulk update application settings from the consolidated view.
# @PRE: User has admin permissions, config is valid. # @PRE: User has admin permissions, config is valid.
# @POST: Settings are updated and saved via ConfigManager. # @POST: Settings are updated and saved via ConfigManager.
@@ -408,10 +481,12 @@ async def get_consolidated_settings(
async def update_consolidated_settings( async def update_consolidated_settings(
settings_patch: dict, settings_patch: dict,
config_manager: ConfigManager = Depends(get_config_manager), config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("update_consolidated_settings"): with belief_scope("update_consolidated_settings"):
logger.info("[update_consolidated_settings][Entry] Applying consolidated settings patch") logger.info(
"[update_consolidated_settings][Entry] Applying consolidated settings patch"
)
current_config = config_manager.get_config() current_config = config_manager.get_config()
current_settings = current_config.settings current_settings = current_config.settings
@@ -443,23 +518,28 @@ async def update_consolidated_settings(
config_manager.update_global_settings(current_settings) config_manager.update_global_settings(current_settings)
return {"status": "success", "message": "Settings updated"} return {"status": "success", "message": "Settings updated"}
# [/DEF:update_consolidated_settings:Function] # [/DEF:update_consolidated_settings:Function]
# [DEF:get_validation_policies:Function] # [DEF:get_validation_policies:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Lists all validation policies. # @PURPOSE: Lists all validation policies.
# @RETURN: List[ValidationPolicyResponse] - List of policies. # @RETURN: List[ValidationPolicyResponse] - List of policies.
@router.get("/automation/policies", response_model=List[ValidationPolicyResponse]) @router.get("/automation/policies", response_model=List[ValidationPolicyResponse])
async def get_validation_policies( async def get_validation_policies(
db: Session = Depends(get_db), db: Session = Depends(get_db), _=Depends(has_permission("admin:settings", "READ"))
_ = Depends(has_permission("admin:settings", "READ"))
): ):
with belief_scope("get_validation_policies"): with belief_scope("get_validation_policies"):
return db.query(ValidationPolicy).all() return db.query(ValidationPolicy).all()
# [/DEF:get_validation_policies:Function] # [/DEF:get_validation_policies:Function]
# [DEF:create_validation_policy:Function] # [DEF:create_validation_policy:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Creates a new validation policy. # @PURPOSE: Creates a new validation policy.
# @PARAM: policy (ValidationPolicyCreate) - The policy data. # @PARAM: policy (ValidationPolicyCreate) - The policy data.
# @RETURN: ValidationPolicyResponse - The created policy. # @RETURN: ValidationPolicyResponse - The created policy.
@@ -467,7 +547,7 @@ async def get_validation_policies(
async def create_validation_policy( async def create_validation_policy(
policy: ValidationPolicyCreate, policy: ValidationPolicyCreate,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("create_validation_policy"): with belief_scope("create_validation_policy"):
db_policy = ValidationPolicy(**policy.dict()) db_policy = ValidationPolicy(**policy.dict())
@@ -475,10 +555,13 @@ async def create_validation_policy(
db.commit() db.commit()
db.refresh(db_policy) db.refresh(db_policy)
return db_policy return db_policy
# [/DEF:create_validation_policy:Function] # [/DEF:create_validation_policy:Function]
# [DEF:update_validation_policy:Function] # [DEF:update_validation_policy:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Updates an existing validation policy. # @PURPOSE: Updates an existing validation policy.
# @PARAM: id (str) - The ID of the policy to update. # @PARAM: id (str) - The ID of the policy to update.
# @PARAM: policy (ValidationPolicyUpdate) - The updated policy data. # @PARAM: policy (ValidationPolicyUpdate) - The updated policy data.
@@ -488,7 +571,7 @@ async def update_validation_policy(
id: str, id: str,
policy: ValidationPolicyUpdate, policy: ValidationPolicyUpdate,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("update_validation_policy"): with belief_scope("update_validation_policy"):
db_policy = db.query(ValidationPolicy).filter(ValidationPolicy.id == id).first() db_policy = db.query(ValidationPolicy).filter(ValidationPolicy.id == id).first()
@@ -502,17 +585,20 @@ async def update_validation_policy(
db.commit() db.commit()
db.refresh(db_policy) db.refresh(db_policy)
return db_policy return db_policy
# [/DEF:update_validation_policy:Function] # [/DEF:update_validation_policy:Function]
# [DEF:delete_validation_policy:Function] # [DEF:delete_validation_policy:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Deletes a validation policy. # @PURPOSE: Deletes a validation policy.
# @PARAM: id (str) - The ID of the policy to delete. # @PARAM: id (str) - The ID of the policy to delete.
@router.delete("/automation/policies/{id}") @router.delete("/automation/policies/{id}")
async def delete_validation_policy( async def delete_validation_policy(
id: str, id: str,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_ = Depends(has_permission("admin:settings", "WRITE")) _=Depends(has_permission("admin:settings", "WRITE")),
): ):
with belief_scope("delete_validation_policy"): with belief_scope("delete_validation_policy"):
db_policy = db.query(ValidationPolicy).filter(ValidationPolicy.id == id).first() db_policy = db.query(ValidationPolicy).filter(ValidationPolicy.id == id).first()
@@ -522,6 +608,8 @@ async def delete_validation_policy(
db.delete(db_policy) db.delete(db_policy)
db.commit() db.commit()
return {"message": "Policy deleted"} return {"message": "Policy deleted"}
# [/DEF:delete_validation_policy:Function] # [/DEF:delete_validation_policy:Function]
# [/DEF:SettingsRouter:Module] # [/DEF:SettingsRouter:Module]

View File

@@ -1,11 +1,11 @@
# [DEF:TasksRouter:Module] # [DEF:TasksRouter:Module]
# @COMPLEXITY: 4 # @COMPLEXITY: 3
# @SEMANTICS: api, router, tasks, create, list, get, logs # @SEMANTICS: api, router, tasks, create, list, get, logs
# @PURPOSE: Defines the FastAPI router for task-related endpoints, allowing clients to create, list, and get the status of tasks. # @PURPOSE: Defines the FastAPI router for task-related endpoints, allowing clients to create, list, and get the status of tasks.
# @LAYER: UI (API) # @LAYER: UI (API)
# @RELATION: DEPENDS_ON -> [backend.src.core.task_manager.manager.TaskManager] # @RELATION: DEPENDS_ON -> [TaskManager]
# @RELATION: DEPENDS_ON -> [backend.src.core.config_manager.ConfigManager] # @RELATION: DEPENDS_ON -> [ConfigManager]
# @RELATION: DEPENDS_ON -> [backend.src.services.llm_provider.LLMProviderService] # @RELATION: DEPENDS_ON -> [LLMProviderService]
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
@@ -107,7 +107,7 @@ async def create_task(
# [/DEF:create_task:Function] # [/DEF:create_task:Function]
# [DEF:list_tasks:Function] # [DEF:list_tasks:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Retrieve a list of tasks with pagination and optional status filter. # @PURPOSE: Retrieve a list of tasks with pagination and optional status filter.
# @PARAM: limit (int) - Maximum number of tasks to return. # @PARAM: limit (int) - Maximum number of tasks to return.
# @PARAM: offset (int) - Number of tasks to skip. # @PARAM: offset (int) - Number of tasks to skip.
@@ -147,7 +147,7 @@ async def list_tasks(
# [/DEF:list_tasks:Function] # [/DEF:list_tasks:Function]
# [DEF:get_task:Function] # [DEF:get_task:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Retrieve the details of a specific task. # @PURPOSE: Retrieve the details of a specific task.
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: task_manager (TaskManager) - The task manager instance. # @PARAM: task_manager (TaskManager) - The task manager instance.
@@ -213,7 +213,7 @@ async def get_task_logs(
# [/DEF:get_task_logs:Function] # [/DEF:get_task_logs:Function]
# [DEF:get_task_log_stats:Function] # [DEF:get_task_log_stats:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Get statistics about logs for a task (counts by level and source). # @PURPOSE: Get statistics about logs for a task (counts by level and source).
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: task_manager (TaskManager) - The task manager instance. # @PARAM: task_manager (TaskManager) - The task manager instance.
@@ -249,7 +249,7 @@ async def get_task_log_stats(
# [/DEF:get_task_log_stats:Function] # [/DEF:get_task_log_stats:Function]
# [DEF:get_task_log_sources:Function] # [DEF:get_task_log_sources:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Get unique sources for a task's logs. # @PURPOSE: Get unique sources for a task's logs.
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: task_manager (TaskManager) - The task manager instance. # @PARAM: task_manager (TaskManager) - The task manager instance.
@@ -269,7 +269,7 @@ async def get_task_log_sources(
# [/DEF:get_task_log_sources:Function] # [/DEF:get_task_log_sources:Function]
# [DEF:resolve_task:Function] # [DEF:resolve_task:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Resolve a task that is awaiting mapping. # @PURPOSE: Resolve a task that is awaiting mapping.
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: request (ResolveTaskRequest) - The resolution parameters. # @PARAM: request (ResolveTaskRequest) - The resolution parameters.
@@ -293,7 +293,7 @@ async def resolve_task(
# [/DEF:resolve_task:Function] # [/DEF:resolve_task:Function]
# [DEF:resume_task:Function] # [DEF:resume_task:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Resume a task that is awaiting input (e.g., passwords). # @PURPOSE: Resume a task that is awaiting input (e.g., passwords).
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: request (ResumeTaskRequest) - The input (passwords). # @PARAM: request (ResumeTaskRequest) - The input (passwords).
@@ -317,7 +317,7 @@ async def resume_task(
# [/DEF:resume_task:Function] # [/DEF:resume_task:Function]
# [DEF:clear_tasks:Function] # [DEF:clear_tasks:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @PURPOSE: Clear tasks matching the status filter. # @PURPOSE: Clear tasks matching the status filter.
# @PARAM: status (Optional[TaskStatus]) - Filter by task status. # @PARAM: status (Optional[TaskStatus]) - Filter by task status.
# @PARAM: task_manager (TaskManager) - The task manager instance. # @PARAM: task_manager (TaskManager) - The task manager instance.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.core.__tests__.test_config_manager_compat:Module] # [DEF:TestConfigManagerCompat:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: config-manager, compatibility, payload, tests # @SEMANTICS: config-manager, compatibility, payload, tests
# @PURPOSE: Verifies ConfigManager compatibility wrappers preserve legacy payload sections. # @PURPOSE: Verifies ConfigManager compatibility wrappers preserve legacy payload sections.
@@ -12,6 +12,7 @@ from src.core.config_models import AppConfig, Environment, GlobalSettings
# [DEF:test_get_payload_preserves_legacy_sections:Function] # [DEF:test_get_payload_preserves_legacy_sections:Function]
# @RELATION: BINDS_TO -> TestConfigManagerCompat
# @PURPOSE: Ensure get_payload merges typed config into raw payload without dropping legacy sections. # @PURPOSE: Ensure get_payload merges typed config into raw payload without dropping legacy sections.
def test_get_payload_preserves_legacy_sections(): def test_get_payload_preserves_legacy_sections():
manager = ConfigManager.__new__(ConfigManager) manager = ConfigManager.__new__(ConfigManager)
@@ -26,6 +27,7 @@ def test_get_payload_preserves_legacy_sections():
# [DEF:test_save_config_accepts_raw_payload_and_keeps_extras:Function] # [DEF:test_save_config_accepts_raw_payload_and_keeps_extras:Function]
# @RELATION: BINDS_TO -> TestConfigManagerCompat
# @PURPOSE: Ensure save_config accepts raw dict payload, refreshes typed config, and preserves extra sections. # @PURPOSE: Ensure save_config accepts raw dict payload, refreshes typed config, and preserves extra sections.
def test_save_config_accepts_raw_payload_and_keeps_extras(monkeypatch): def test_save_config_accepts_raw_payload_and_keeps_extras(monkeypatch):
manager = ConfigManager.__new__(ConfigManager) manager = ConfigManager.__new__(ConfigManager)
@@ -53,6 +55,7 @@ def test_save_config_accepts_raw_payload_and_keeps_extras(monkeypatch):
# [DEF:test_save_config_syncs_environment_records_for_fk_backed_flows:Function] # [DEF:test_save_config_syncs_environment_records_for_fk_backed_flows:Function]
# @RELATION: BINDS_TO -> TestConfigManagerCompat
# @PURPOSE: Ensure saving config mirrors typed environments into relational records required by FK-backed session persistence. # @PURPOSE: Ensure saving config mirrors typed environments into relational records required by FK-backed session persistence.
def test_save_config_syncs_environment_records_for_fk_backed_flows(): def test_save_config_syncs_environment_records_for_fk_backed_flows():
manager = ConfigManager.__new__(ConfigManager) manager = ConfigManager.__new__(ConfigManager)
@@ -108,6 +111,7 @@ def test_save_config_syncs_environment_records_for_fk_backed_flows():
# [DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function] # [DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function]
# @RELATION: BINDS_TO -> TestConfigManagerCompat
# @PURPOSE: Ensure loading an existing DB-backed config also mirrors environment rows required by FK-backed runtime flows. # @PURPOSE: Ensure loading an existing DB-backed config also mirrors environment rows required by FK-backed runtime flows.
def test_load_config_syncs_environment_records_from_existing_db_payload(monkeypatch): def test_load_config_syncs_environment_records_from_existing_db_payload(monkeypatch):
manager = ConfigManager.__new__(ConfigManager) manager = ConfigManager.__new__(ConfigManager)
@@ -161,4 +165,4 @@ def test_load_config_syncs_environment_records_from_existing_db_payload(monkeypa
assert closed["value"] is True assert closed["value"] is True
# [/DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function] # [/DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function]
# [/DEF:backend.src.core.__tests__.test_config_manager_compat:Module] # [/DEF:TestConfigManagerCompat:Module]

View File

@@ -28,6 +28,7 @@ from src.models.filter_state import (
# [DEF:_make_environment:Function] # [DEF:_make_environment:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
def _make_environment() -> Environment: def _make_environment() -> Environment:
return Environment( return Environment(
id="env-1", id="env-1",
@@ -40,6 +41,7 @@ def _make_environment() -> Environment:
# [DEF:test_extract_native_filters_from_permalink:Function] # [DEF:test_extract_native_filters_from_permalink:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Extract native filters from a permalink key. # @PURPOSE: Extract native filters from a permalink key.
def test_extract_native_filters_from_permalink(): def test_extract_native_filters_from_permalink():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -86,6 +88,7 @@ def test_extract_native_filters_from_permalink():
# [DEF:test_extract_native_filters_from_permalink_direct_response:Function] # [DEF:test_extract_native_filters_from_permalink_direct_response:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Handle permalink response without result wrapper. # @PURPOSE: Handle permalink response without result wrapper.
def test_extract_native_filters_from_permalink_direct_response(): def test_extract_native_filters_from_permalink_direct_response():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -111,6 +114,7 @@ def test_extract_native_filters_from_permalink_direct_response():
# [DEF:test_extract_native_filters_from_key:Function] # [DEF:test_extract_native_filters_from_key:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Extract native filters from a native_filters_key. # @PURPOSE: Extract native filters from a native_filters_key.
def test_extract_native_filters_from_key(): def test_extract_native_filters_from_key():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -141,6 +145,7 @@ def test_extract_native_filters_from_key():
# [DEF:test_extract_native_filters_from_key_single_filter:Function] # [DEF:test_extract_native_filters_from_key_single_filter:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Handle single filter format in native filter state. # @PURPOSE: Handle single filter format in native filter state.
def test_extract_native_filters_from_key_single_filter(): def test_extract_native_filters_from_key_single_filter():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -165,6 +170,7 @@ def test_extract_native_filters_from_key_single_filter():
# [DEF:test_extract_native_filters_from_key_dict_value:Function] # [DEF:test_extract_native_filters_from_key_dict_value:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Handle filter state value as dict instead of JSON string. # @PURPOSE: Handle filter state value as dict instead of JSON string.
def test_extract_native_filters_from_key_dict_value(): def test_extract_native_filters_from_key_dict_value():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -189,6 +195,7 @@ def test_extract_native_filters_from_key_dict_value():
# [DEF:test_parse_dashboard_url_for_filters_permalink:Function] # [DEF:test_parse_dashboard_url_for_filters_permalink:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Parse permalink URL format. # @PURPOSE: Parse permalink URL format.
def test_parse_dashboard_url_for_filters_permalink(): def test_parse_dashboard_url_for_filters_permalink():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -206,6 +213,7 @@ def test_parse_dashboard_url_for_filters_permalink():
# [DEF:test_parse_dashboard_url_for_filters_native_key:Function] # [DEF:test_parse_dashboard_url_for_filters_native_key:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Parse native_filters_key URL format with numeric dashboard ID. # @PURPOSE: Parse native_filters_key URL format with numeric dashboard ID.
def test_parse_dashboard_url_for_filters_native_key(): def test_parse_dashboard_url_for_filters_native_key():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -224,6 +232,7 @@ def test_parse_dashboard_url_for_filters_native_key():
# [DEF:test_parse_dashboard_url_for_filters_native_key_slug:Function] # [DEF:test_parse_dashboard_url_for_filters_native_key_slug:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Parse native_filters_key URL format when dashboard reference is a slug, not a numeric ID. # @PURPOSE: Parse native_filters_key URL format when dashboard reference is a slug, not a numeric ID.
def test_parse_dashboard_url_for_filters_native_key_slug(): def test_parse_dashboard_url_for_filters_native_key_slug():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -250,6 +259,7 @@ def test_parse_dashboard_url_for_filters_native_key_slug():
# [DEF:test_parse_dashboard_url_for_filters_native_key_slug_resolution_fails:Function] # [DEF:test_parse_dashboard_url_for_filters_native_key_slug_resolution_fails:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Gracefully handle slug resolution failure for native_filters_key URL. # @PURPOSE: Gracefully handle slug resolution failure for native_filters_key URL.
def test_parse_dashboard_url_for_filters_native_key_slug_resolution_fails(): def test_parse_dashboard_url_for_filters_native_key_slug_resolution_fails():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -265,6 +275,7 @@ def test_parse_dashboard_url_for_filters_native_key_slug_resolution_fails():
# [DEF:test_parse_dashboard_url_for_filters_native_filters_direct:Function] # [DEF:test_parse_dashboard_url_for_filters_native_filters_direct:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Parse native_filters direct query param. # @PURPOSE: Parse native_filters direct query param.
def test_parse_dashboard_url_for_filters_native_filters_direct(): def test_parse_dashboard_url_for_filters_native_filters_direct():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -280,6 +291,7 @@ def test_parse_dashboard_url_for_filters_native_filters_direct():
# [DEF:test_parse_dashboard_url_for_filters_no_filters:Function] # [DEF:test_parse_dashboard_url_for_filters_no_filters:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Return empty result when no filters present. # @PURPOSE: Return empty result when no filters present.
def test_parse_dashboard_url_for_filters_no_filters(): def test_parse_dashboard_url_for_filters_no_filters():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -294,6 +306,7 @@ def test_parse_dashboard_url_for_filters_no_filters():
# [DEF:test_extra_form_data_merge:Function] # [DEF:test_extra_form_data_merge:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Test ExtraFormDataMerge correctly merges dictionaries. # @PURPOSE: Test ExtraFormDataMerge correctly merges dictionaries.
def test_extra_form_data_merge(): def test_extra_form_data_merge():
merger = ExtraFormDataMerge() merger = ExtraFormDataMerge()
@@ -329,6 +342,7 @@ def test_extra_form_data_merge():
# [DEF:test_filter_state_model:Function] # [DEF:test_filter_state_model:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Test FilterState Pydantic model. # @PURPOSE: Test FilterState Pydantic model.
def test_filter_state_model(): def test_filter_state_model():
state = FilterState( state = FilterState(
@@ -344,6 +358,7 @@ def test_filter_state_model():
# [DEF:test_parsed_native_filters_model:Function] # [DEF:test_parsed_native_filters_model:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Test ParsedNativeFilters Pydantic model. # @PURPOSE: Test ParsedNativeFilters Pydantic model.
def test_parsed_native_filters_model(): def test_parsed_native_filters_model():
filters = ParsedNativeFilters( filters = ParsedNativeFilters(
@@ -360,6 +375,7 @@ def test_parsed_native_filters_model():
# [DEF:test_parsed_native_filters_empty:Function] # [DEF:test_parsed_native_filters_empty:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Test ParsedNativeFilters with no filters. # @PURPOSE: Test ParsedNativeFilters with no filters.
def test_parsed_native_filters_empty(): def test_parsed_native_filters_empty():
filters = ParsedNativeFilters() filters = ParsedNativeFilters()
@@ -370,6 +386,7 @@ def test_parsed_native_filters_empty():
# [DEF:test_native_filter_data_mask_model:Function] # [DEF:test_native_filter_data_mask_model:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Test NativeFilterDataMask model. # @PURPOSE: Test NativeFilterDataMask model.
def test_native_filter_data_mask_model(): def test_native_filter_data_mask_model():
data_mask = NativeFilterDataMask( data_mask = NativeFilterDataMask(
@@ -386,6 +403,7 @@ def test_native_filter_data_mask_model():
# [DEF:test_recover_imported_filters_reconciles_raw_native_filter_ids_to_metadata_names:Function] # [DEF:test_recover_imported_filters_reconciles_raw_native_filter_ids_to_metadata_names:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Reconcile raw native filter ids from state to canonical metadata filter names. # @PURPOSE: Reconcile raw native filter ids from state to canonical metadata filter names.
def test_recover_imported_filters_reconciles_raw_native_filter_ids_to_metadata_names(): def test_recover_imported_filters_reconciles_raw_native_filter_ids_to_metadata_names():
client = MagicMock() client = MagicMock()
@@ -444,6 +462,7 @@ def test_recover_imported_filters_reconciles_raw_native_filter_ids_to_metadata_n
# [DEF:test_recover_imported_filters_collapses_state_and_metadata_duplicates_into_one_canonical_filter:Function] # [DEF:test_recover_imported_filters_collapses_state_and_metadata_duplicates_into_one_canonical_filter:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Collapse raw-id state entries and metadata entries into one canonical filter. # @PURPOSE: Collapse raw-id state entries and metadata entries into one canonical filter.
def test_recover_imported_filters_collapses_state_and_metadata_duplicates_into_one_canonical_filter(): def test_recover_imported_filters_collapses_state_and_metadata_duplicates_into_one_canonical_filter():
client = MagicMock() client = MagicMock()
@@ -499,6 +518,7 @@ def test_recover_imported_filters_collapses_state_and_metadata_duplicates_into_o
# [DEF:test_recover_imported_filters_preserves_unmatched_raw_native_filter_ids:Function] # [DEF:test_recover_imported_filters_preserves_unmatched_raw_native_filter_ids:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Preserve unmatched raw native filter ids as fallback diagnostics when metadata mapping is unavailable. # @PURPOSE: Preserve unmatched raw native filter ids as fallback diagnostics when metadata mapping is unavailable.
def test_recover_imported_filters_preserves_unmatched_raw_native_filter_ids(): def test_recover_imported_filters_preserves_unmatched_raw_native_filter_ids():
client = MagicMock() client = MagicMock()
@@ -550,6 +570,7 @@ def test_recover_imported_filters_preserves_unmatched_raw_native_filter_ids():
# [DEF:test_extract_imported_filters_preserves_clause_level_native_filter_payload_for_preview:Function] # [DEF:test_extract_imported_filters_preserves_clause_level_native_filter_payload_for_preview:Function]
# @RELATION: BINDS_TO -> NativeFilterExtractionTests
# @PURPOSE: Recovered native filter state should preserve exact Superset clause payload and time extras for preview compilation. # @PURPOSE: Recovered native filter state should preserve exact Superset clause payload and time extras for preview compilation.
def test_extract_imported_filters_preserves_clause_level_native_filter_payload_for_preview(): def test_extract_imported_filters_preserves_clause_level_native_filter_payload_for_preview():
extractor = SupersetContextExtractor(_make_environment(), client=MagicMock()) extractor = SupersetContextExtractor(_make_environment(), client=MagicMock())

View File

@@ -19,6 +19,7 @@ from src.core.utils.network import APIClient, DashboardNotFoundError, SupersetAP
# [DEF:_make_environment:Function] # [DEF:_make_environment:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
def _make_environment() -> Environment: def _make_environment() -> Environment:
return Environment( return Environment(
id="env-1", id="env-1",
@@ -33,6 +34,7 @@ def _make_environment() -> Environment:
# [DEF:_make_requests_http_error:Function] # [DEF:_make_requests_http_error:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
def _make_requests_http_error( def _make_requests_http_error(
status_code: int, url: str status_code: int, url: str
) -> requests.exceptions.HTTPError: ) -> requests.exceptions.HTTPError:
@@ -49,6 +51,7 @@ def _make_requests_http_error(
# [DEF:_make_httpx_status_error:Function] # [DEF:_make_httpx_status_error:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
def _make_httpx_status_error(status_code: int, url: str) -> httpx.HTTPStatusError: def _make_httpx_status_error(status_code: int, url: str) -> httpx.HTTPStatusError:
request = httpx.Request("GET", url) request = httpx.Request("GET", url)
response = httpx.Response( response = httpx.Response(
@@ -61,6 +64,7 @@ def _make_httpx_status_error(status_code: int, url: str) -> httpx.HTTPStatusErro
# [DEF:test_compile_dataset_preview_prefers_legacy_explore_form_data_strategy:Function] # [DEF:test_compile_dataset_preview_prefers_legacy_explore_form_data_strategy:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Superset preview compilation should prefer the legacy form_data transport inferred from browser traffic before falling back to chart-data. # @PURPOSE: Superset preview compilation should prefer the legacy form_data transport inferred from browser traffic before falling back to chart-data.
def test_compile_dataset_preview_prefers_legacy_explore_form_data_strategy(): def test_compile_dataset_preview_prefers_legacy_explore_form_data_strategy():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -146,6 +150,7 @@ def test_compile_dataset_preview_prefers_legacy_explore_form_data_strategy():
# [DEF:test_compile_dataset_preview_falls_back_to_chart_data_after_legacy_failures:Function] # [DEF:test_compile_dataset_preview_falls_back_to_chart_data_after_legacy_failures:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Superset preview compilation should fall back to chart-data when legacy form_data strategies are rejected. # @PURPOSE: Superset preview compilation should fall back to chart-data when legacy form_data strategies are rejected.
def test_compile_dataset_preview_falls_back_to_chart_data_after_legacy_failures(): def test_compile_dataset_preview_falls_back_to_chart_data_after_legacy_failures():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -242,6 +247,7 @@ def test_compile_dataset_preview_falls_back_to_chart_data_after_legacy_failures(
# [DEF:test_build_dataset_preview_query_context_places_recovered_filters_in_chart_style_form_data:Function] # [DEF:test_build_dataset_preview_query_context_places_recovered_filters_in_chart_style_form_data:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Preview query context should mirror chart-style filter transport so recovered native filters reach Superset compilation. # @PURPOSE: Preview query context should mirror chart-style filter transport so recovered native filters reach Superset compilation.
def test_build_dataset_preview_query_context_places_recovered_filters_in_chart_style_form_data(): def test_build_dataset_preview_query_context_places_recovered_filters_in_chart_style_form_data():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -304,6 +310,7 @@ def test_build_dataset_preview_query_context_places_recovered_filters_in_chart_s
# [DEF:test_build_dataset_preview_query_context_merges_dataset_template_params_and_preserves_user_values:Function] # [DEF:test_build_dataset_preview_query_context_merges_dataset_template_params_and_preserves_user_values:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Preview query context should merge dataset template params for parity with real dataset definitions while preserving explicit session overrides. # @PURPOSE: Preview query context should merge dataset template params for parity with real dataset definitions while preserving explicit session overrides.
def test_build_dataset_preview_query_context_merges_dataset_template_params_and_preserves_user_values(): def test_build_dataset_preview_query_context_merges_dataset_template_params_and_preserves_user_values():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -334,6 +341,7 @@ def test_build_dataset_preview_query_context_merges_dataset_template_params_and_
# [DEF:test_build_dataset_preview_query_context_preserves_time_range_from_native_filter_payload:Function] # [DEF:test_build_dataset_preview_query_context_preserves_time_range_from_native_filter_payload:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Preview query context should preserve time-range native filter extras even when dataset defaults differ. # @PURPOSE: Preview query context should preserve time-range native filter extras even when dataset defaults differ.
def test_build_dataset_preview_query_context_preserves_time_range_from_native_filter_payload(): def test_build_dataset_preview_query_context_preserves_time_range_from_native_filter_payload():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -372,6 +380,7 @@ def test_build_dataset_preview_query_context_preserves_time_range_from_native_fi
# [DEF:test_build_dataset_preview_legacy_form_data_preserves_native_filter_clauses:Function] # [DEF:test_build_dataset_preview_legacy_form_data_preserves_native_filter_clauses:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Legacy preview form_data should preserve recovered native filter clauses in browser-style fields without duplicating datasource for QueryObjectFactory. # @PURPOSE: Legacy preview form_data should preserve recovered native filter clauses in browser-style fields without duplicating datasource for QueryObjectFactory.
def test_build_dataset_preview_legacy_form_data_preserves_native_filter_clauses(): def test_build_dataset_preview_legacy_form_data_preserves_native_filter_clauses():
client = SupersetClient(_make_environment()) client = SupersetClient(_make_environment())
@@ -425,6 +434,7 @@ def test_build_dataset_preview_legacy_form_data_preserves_native_filter_clauses(
# [DEF:test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function] # [DEF:test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Sync network client should reserve dashboard-not-found translation for dashboard endpoints only. # @PURPOSE: Sync network client should reserve dashboard-not-found translation for dashboard endpoints only.
def test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic(): def test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic():
client = APIClient( client = APIClient(
@@ -448,6 +458,7 @@ def test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic():
# [DEF:test_sync_network_404_mapping_translates_dashboard_endpoints:Function] # [DEF:test_sync_network_404_mapping_translates_dashboard_endpoints:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Sync network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors. # @PURPOSE: Sync network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors.
def test_sync_network_404_mapping_translates_dashboard_endpoints(): def test_sync_network_404_mapping_translates_dashboard_endpoints():
client = APIClient( client = APIClient(
@@ -470,6 +481,7 @@ def test_sync_network_404_mapping_translates_dashboard_endpoints():
# [DEF:test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function] # [DEF:test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Async network client should reserve dashboard-not-found translation for dashboard endpoints only. # @PURPOSE: Async network client should reserve dashboard-not-found translation for dashboard endpoints only.
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic(): async def test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic():
@@ -499,6 +511,7 @@ async def test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic()
# [DEF:test_async_network_404_mapping_translates_dashboard_endpoints:Function] # [DEF:test_async_network_404_mapping_translates_dashboard_endpoints:Function]
# @RELATION: BINDS_TO -> SupersetPreviewPipelineTests
# @PURPOSE: Async network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors. # @PURPOSE: Async network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors.
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_network_404_mapping_translates_dashboard_endpoints(): async def test_async_network_404_mapping_translates_dashboard_endpoints():

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.core.__tests__.test_superset_profile_lookup:Module] # [DEF:TestSupersetProfileLookup:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, superset, profile, lookup, fallback, sorting # @SEMANTICS: tests, superset, profile, lookup, fallback, sorting
# @PURPOSE: Verifies Superset profile lookup adapter payload normalization and fallback error precedence. # @PURPOSE: Verifies Superset profile lookup adapter payload normalization and fallback error precedence.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.core.superset_profile_lookup
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
import json import json
@@ -23,7 +23,10 @@ from src.core.utils.network import AuthenticationError, SupersetAPIError
# [DEF:_RecordingNetworkClient:Class] # [DEF:_RecordingNetworkClient:Class]
# @RELATION: BINDS_TO -> TestSupersetProfileLookup
# @COMPLEXITY: 2
# @PURPOSE: Records request payloads and returns scripted responses for deterministic adapter tests. # @PURPOSE: Records request payloads and returns scripted responses for deterministic adapter tests.
# @INVARIANT: Each request consumes one scripted response in call order and persists call metadata.
class _RecordingNetworkClient: class _RecordingNetworkClient:
# [DEF:__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Initializes scripted network responses. # @PURPOSE: Initializes scripted network responses.
@@ -32,6 +35,7 @@ class _RecordingNetworkClient:
def __init__(self, scripted_responses: List[Any]): def __init__(self, scripted_responses: List[Any]):
self._scripted_responses = scripted_responses self._scripted_responses = scripted_responses
self.calls: List[Dict[str, Any]] = [] self.calls: List[Dict[str, Any]] = []
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:request:Function] # [DEF:request:Function]
@@ -57,11 +61,15 @@ class _RecordingNetworkClient:
if isinstance(response, Exception): if isinstance(response, Exception):
raise response raise response
return response return response
# [/DEF:request:Function] # [/DEF:request:Function]
# [/DEF:_RecordingNetworkClient:Class] # [/DEF:_RecordingNetworkClient:Class]
# [DEF:test_get_users_page_sends_lowercase_order_direction:Function] # [DEF:test_get_users_page_sends_lowercase_order_direction:Function]
# @RELATION: BINDS_TO -> TestSupersetProfileLookup
# @PURPOSE: Ensures adapter sends lowercase order_direction compatible with Superset rison schema. # @PURPOSE: Ensures adapter sends lowercase order_direction compatible with Superset rison schema.
# @PRE: Adapter is initialized with recording network client. # @PRE: Adapter is initialized with recording network client.
# @POST: First request query payload contains order_direction='asc' for asc sort. # @POST: First request query payload contains order_direction='asc' for asc sort.
@@ -69,7 +77,9 @@ def test_get_users_page_sends_lowercase_order_direction():
client = _RecordingNetworkClient( client = _RecordingNetworkClient(
scripted_responses=[{"result": [{"username": "admin"}], "count": 1}] scripted_responses=[{"result": [{"username": "admin"}], "count": 1}]
) )
adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev") adapter = SupersetAccountLookupAdapter(
network_client=client, environment_id="ss-dev"
)
adapter.get_users_page( adapter.get_users_page(
search="admin", search="admin",
@@ -81,10 +91,13 @@ def test_get_users_page_sends_lowercase_order_direction():
sent_query = json.loads(client.calls[0]["params"]["q"]) sent_query = json.loads(client.calls[0]["params"]["q"])
assert sent_query["order_direction"] == "asc" assert sent_query["order_direction"] == "asc"
# [/DEF:test_get_users_page_sends_lowercase_order_direction:Function] # [/DEF:test_get_users_page_sends_lowercase_order_direction:Function]
# [DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function] # [DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function]
# @RELATION: BINDS_TO -> TestSupersetProfileLookup
# @PURPOSE: Ensures fallback auth error does not mask primary schema/query failure. # @PURPOSE: Ensures fallback auth error does not mask primary schema/query failure.
# @PRE: Primary endpoint fails with SupersetAPIError and fallback fails with AuthenticationError. # @PRE: Primary endpoint fails with SupersetAPIError and fallback fails with AuthenticationError.
# @POST: Raised exception remains primary SupersetAPIError (non-auth) to preserve root cause. # @POST: Raised exception remains primary SupersetAPIError (non-auth) to preserve root cause.
@@ -95,17 +108,22 @@ def test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error(
AuthenticationError(), AuthenticationError(),
] ]
) )
adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev") adapter = SupersetAccountLookupAdapter(
network_client=client, environment_id="ss-dev"
)
with pytest.raises(SupersetAPIError) as exc_info: with pytest.raises(SupersetAPIError) as exc_info:
adapter.get_users_page(sort_order="asc") adapter.get_users_page(sort_order="asc")
assert "API Error 400" in str(exc_info.value) assert "API Error 400" in str(exc_info.value)
assert not isinstance(exc_info.value, AuthenticationError) assert not isinstance(exc_info.value, AuthenticationError)
# [/DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function] # [/DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function]
# [DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function] # [DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function]
# @RELATION: BINDS_TO -> TestSupersetProfileLookup
# @PURPOSE: Verifies adapter retries second users endpoint and succeeds when fallback is healthy. # @PURPOSE: Verifies adapter retries second users endpoint and succeeds when fallback is healthy.
# @PRE: Primary endpoint fails; fallback returns valid users payload. # @PRE: Primary endpoint fails; fallback returns valid users payload.
# @POST: Result status is success and both endpoints were attempted in order. # @POST: Result status is success and both endpoints were attempted in order.
@@ -116,13 +134,20 @@ def test_get_users_page_uses_fallback_endpoint_when_primary_fails():
{"result": [{"username": "admin"}], "count": 1}, {"result": [{"username": "admin"}], "count": 1},
] ]
) )
adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev") adapter = SupersetAccountLookupAdapter(
network_client=client, environment_id="ss-dev"
)
result = adapter.get_users_page() result = adapter.get_users_page()
assert result["status"] == "success" assert result["status"] == "success"
assert [call["endpoint"] for call in client.calls] == ["/security/users/", "/security/users"] assert [call["endpoint"] for call in client.calls] == [
"/security/users/",
"/security/users",
]
# [/DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function] # [/DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function]
# [/DEF:backend.src.core.__tests__.test_superset_profile_lookup:Module] # [/DEF:TestSupersetProfileLookup:Module]

View File

@@ -3,9 +3,12 @@ from datetime import time, date, datetime, timedelta
from src.core.scheduler import ThrottledSchedulerConfigurator from src.core.scheduler import ThrottledSchedulerConfigurator
# [DEF:test_throttled_scheduler:Module] # [DEF:test_throttled_scheduler:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Unit tests for ThrottledSchedulerConfigurator distribution logic. # @PURPOSE: Unit tests for ThrottledSchedulerConfigurator distribution logic.
# [DEF:test_calculate_schedule_even_distribution:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_even_distribution(): def test_calculate_schedule_even_distribution():
""" """
@TEST_SCENARIO: 3 tasks in a 2-hour window should be spaced 1 hour apart. @TEST_SCENARIO: 3 tasks in a 2-hour window should be spaced 1 hour apart.
@@ -22,6 +25,10 @@ def test_calculate_schedule_even_distribution():
assert schedule[1] == datetime(2024, 1, 1, 2, 0) assert schedule[1] == datetime(2024, 1, 1, 2, 0)
assert schedule[2] == datetime(2024, 1, 1, 3, 0) assert schedule[2] == datetime(2024, 1, 1, 3, 0)
# [/DEF:test_calculate_schedule_even_distribution:Function]
# [DEF:test_calculate_schedule_midnight_crossing:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_midnight_crossing(): def test_calculate_schedule_midnight_crossing():
""" """
@TEST_SCENARIO: Window from 23:00 to 01:00 (next day). @TEST_SCENARIO: Window from 23:00 to 01:00 (next day).
@@ -38,6 +45,10 @@ def test_calculate_schedule_midnight_crossing():
assert schedule[1] == datetime(2024, 1, 2, 0, 0) assert schedule[1] == datetime(2024, 1, 2, 0, 0)
assert schedule[2] == datetime(2024, 1, 2, 1, 0) assert schedule[2] == datetime(2024, 1, 2, 1, 0)
# [/DEF:test_calculate_schedule_midnight_crossing:Function]
# [DEF:test_calculate_schedule_single_task:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_single_task(): def test_calculate_schedule_single_task():
""" """
@TEST_SCENARIO: Single task should be scheduled at start time. @TEST_SCENARIO: Single task should be scheduled at start time.
@@ -52,6 +63,10 @@ def test_calculate_schedule_single_task():
assert len(schedule) == 1 assert len(schedule) == 1
assert schedule[0] == datetime(2024, 1, 1, 1, 0) assert schedule[0] == datetime(2024, 1, 1, 1, 0)
# [/DEF:test_calculate_schedule_single_task:Function]
# [DEF:test_calculate_schedule_empty_list:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_empty_list(): def test_calculate_schedule_empty_list():
""" """
@TEST_SCENARIO: Empty dashboard list returns empty schedule. @TEST_SCENARIO: Empty dashboard list returns empty schedule.
@@ -65,6 +80,10 @@ def test_calculate_schedule_empty_list():
assert schedule == [] assert schedule == []
# [/DEF:test_calculate_schedule_empty_list:Function]
# [DEF:test_calculate_schedule_zero_window:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_zero_window(): def test_calculate_schedule_zero_window():
""" """
@TEST_SCENARIO: Window start == end. All tasks at start time. @TEST_SCENARIO: Window start == end. All tasks at start time.
@@ -80,6 +99,10 @@ def test_calculate_schedule_zero_window():
assert schedule[0] == datetime(2024, 1, 1, 1, 0) assert schedule[0] == datetime(2024, 1, 1, 1, 0)
assert schedule[1] == datetime(2024, 1, 1, 1, 0) assert schedule[1] == datetime(2024, 1, 1, 1, 0)
# [/DEF:test_calculate_schedule_zero_window:Function]
# [DEF:test_calculate_schedule_very_small_window:Function]
# @RELATION: BINDS_TO -> test_throttled_scheduler
def test_calculate_schedule_very_small_window(): def test_calculate_schedule_very_small_window():
""" """
@TEST_SCENARIO: Window smaller than number of tasks (in seconds). @TEST_SCENARIO: Window smaller than number of tasks (in seconds).
@@ -96,4 +119,4 @@ def test_calculate_schedule_very_small_window():
assert schedule[1] == datetime(2024, 1, 1, 1, 0, 0, 500000) # 0.5s assert schedule[1] == datetime(2024, 1, 1, 1, 0, 0, 500000) # 0.5s
assert schedule[2] == datetime(2024, 1, 1, 1, 0, 1) assert schedule[2] == datetime(2024, 1, 1, 1, 0, 1)
# [/DEF:test_throttled_scheduler:Module] # [/DEF:test_throttled_scheduler:Module]# [/DEF:test_calculate_schedule_very_small_window:Function]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.core.async_superset_client:Module] # [DEF:AsyncSupersetClientModule:Module]
# #
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: superset, async, client, httpx, dashboards, datasets # @SEMANTICS: superset, async, client, httpx, dashboards, datasets
@@ -8,8 +8,8 @@
# @POST: Provides non-blocking API access to Superset resources. # @POST: Provides non-blocking API access to Superset resources.
# @SIDE_EFFECT: Performs network I/O via httpx. # @SIDE_EFFECT: Performs network I/O via httpx.
# @DATA_CONTRACT: Input[Environment] -> Model[dashboard, chart, dataset] # @DATA_CONTRACT: Input[Environment] -> Model[dashboard, chart, dataset]
# @RELATION: [DEPENDS_ON] ->[backend.src.core.superset_client] # @RELATION: [DEPENDS_ON] ->[SupersetClientModule]
# @RELATION: [DEPENDS_ON] ->[backend.src.core.utils.async_network.AsyncAPIClient] # @RELATION: [DEPENDS_ON] ->[AsyncAPIClient]
# @INVARIANT: Async dashboard operations reuse shared auth cache and avoid sync requests in async routes. # @INVARIANT: Async dashboard operations reuse shared auth cache and avoid sync requests in async routes.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
@@ -25,12 +25,12 @@ from .utils.async_network import AsyncAPIClient
# [/SECTION] # [/SECTION]
# [DEF:backend.src.core.async_superset_client.AsyncSupersetClient:Class] # [DEF:AsyncSupersetClient:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Async sibling of SupersetClient for dashboard read paths. # @PURPOSE: Async sibling of SupersetClient for dashboard read paths.
# @RELATION: [INHERITS] ->[backend.src.core.superset_client.SupersetClient] # @RELATION: [INHERITS] ->[SupersetClient]
# @RELATION: [DEPENDS_ON] ->[backend.src.core.utils.async_network.AsyncAPIClient] # @RELATION: [DEPENDS_ON] ->[AsyncAPIClient]
# @RELATION: [CALLS] ->[backend.src.core.utils.async_network.AsyncAPIClient.request] # @RELATION: [CALLS] ->[AsyncAPIClient.request]
class AsyncSupersetClient(SupersetClient): class AsyncSupersetClient(SupersetClient):
# [DEF:AsyncSupersetClientInit:Function] # [DEF:AsyncSupersetClientInit:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
@@ -67,11 +67,12 @@ class AsyncSupersetClient(SupersetClient):
# [/DEF:AsyncSupersetClientClose:Function] # [/DEF:AsyncSupersetClientClose:Function]
# [DEF:backend.src.core.async_superset_client.AsyncSupersetClient.get_dashboards_page_async:Function] # [DEF:get_dashboards_page_async:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Fetch one dashboards page asynchronously. # @PURPOSE: Fetch one dashboards page asynchronously.
# @POST: Returns total count and page result list. # @POST: Returns total count and page result list.
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]] # @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
# @RELATION: [CALLS] -> [AsyncAPIClient.request]
async def get_dashboards_page_async( async def get_dashboards_page_async(
self, query: Optional[Dict] = None self, query: Optional[Dict] = None
) -> Tuple[int, List[Dict]]: ) -> Tuple[int, List[Dict]]:
@@ -687,4 +688,4 @@ class AsyncSupersetClient(SupersetClient):
# [/DEF:AsyncSupersetClient:Class] # [/DEF:AsyncSupersetClient:Class]
# [/DEF:backend.src.core.async_superset_client:Module] # [/DEF:AsyncSupersetClientModule:Module]

View File

@@ -1,3 +1,3 @@
# [DEF:src.core.auth:Package] # [DEF:AuthPackage:Package]
# @PURPOSE: Authentication and authorization package root. # @PURPOSE: Authentication and authorization package root.
# [/DEF:src.core.auth:Package] # [/DEF:AuthPackage:Package]

View File

@@ -2,7 +2,7 @@
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Unit tests for authentication module # @PURPOSE: Unit tests for authentication module
# @LAYER: Domain # @LAYER: Domain
# @RELATION: VERIFIES -> src.core.auth # @RELATION: VERIFIES -> AuthPackage
import sys import sys
from pathlib import Path from pathlib import Path
@@ -14,6 +14,7 @@ import pytest
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from src.core.database import Base from src.core.database import Base
# Import all models to ensure they are registered with Base before create_all - must import both auth and mapping to ensure Base knows about all tables # Import all models to ensure they are registered with Base before create_all - must import both auth and mapping to ensure Base knows about all tables
from src.models import mapping, auth, task, report from src.models import mapping, auth, task, report
from src.models.auth import User, Role, Permission, ADGroupMapping from src.models.auth import User, Role, Permission, ADGroupMapping
@@ -24,7 +25,9 @@ from src.core.auth.security import verify_password, get_password_hash
# Create in-memory SQLite database for testing # Create in-memory SQLite database for testing
SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:" SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}) engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Create all tables # Create all tables
@@ -55,13 +58,16 @@ def auth_repo(db_session):
return AuthRepository(db_session) return AuthRepository(db_session)
# [DEF:test_create_user:Function]
# @PURPOSE: Verifies that a persisted user can be retrieved with intact credential hash.
# @RELATION: BINDS_TO -> test_auth
def test_create_user(auth_repo): def test_create_user(auth_repo):
"""Test user creation""" """Test user creation"""
user = User( user = User(
username="testuser", username="testuser",
email="test@example.com", email="test@example.com",
password_hash=get_password_hash("testpassword123"), password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL" auth_source="LOCAL",
) )
auth_repo.db.add(user) auth_repo.db.add(user)
@@ -74,13 +80,19 @@ def test_create_user(auth_repo):
assert verify_password("testpassword123", retrieved_user.password_hash) assert verify_password("testpassword123", retrieved_user.password_hash)
# [/DEF:test_create_user:Function]
# [DEF:test_authenticate_user:Function]
# @PURPOSE: Validates authentication outcomes for valid, wrong-password, and unknown-user cases.
# @RELATION: BINDS_TO -> test_auth
def test_authenticate_user(auth_service, auth_repo): def test_authenticate_user(auth_service, auth_repo):
"""Test user authentication with valid and invalid credentials""" """Test user authentication with valid and invalid credentials"""
user = User( user = User(
username="testuser", username="testuser",
email="test@example.com", email="test@example.com",
password_hash=get_password_hash("testpassword123"), password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL" auth_source="LOCAL",
) )
auth_repo.db.add(user) auth_repo.db.add(user)
@@ -100,13 +112,19 @@ def test_authenticate_user(auth_service, auth_repo):
assert invalid_user is None assert invalid_user is None
# [/DEF:test_authenticate_user:Function]
# [DEF:test_create_session:Function]
# @PURPOSE: Ensures session creation returns bearer token payload fields.
# @RELATION: BINDS_TO -> test_auth
def test_create_session(auth_service, auth_repo): def test_create_session(auth_service, auth_repo):
"""Test session token creation""" """Test session token creation"""
user = User( user = User(
username="testuser", username="testuser",
email="test@example.com", email="test@example.com",
password_hash=get_password_hash("testpassword123"), password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL" auth_source="LOCAL",
) )
auth_repo.db.add(user) auth_repo.db.add(user)
@@ -119,6 +137,12 @@ def test_create_session(auth_service, auth_repo):
assert len(session["access_token"]) > 0 assert len(session["access_token"]) > 0
# [/DEF:test_create_session:Function]
# [DEF:test_role_permission_association:Function]
# @PURPOSE: Confirms role-permission many-to-many assignments persist and reload correctly.
# @RELATION: BINDS_TO -> test_auth
def test_role_permission_association(auth_repo): def test_role_permission_association(auth_repo):
"""Test role and permission association""" """Test role and permission association"""
role = Role(name="Admin", description="System administrator") role = Role(name="Admin", description="System administrator")
@@ -139,6 +163,12 @@ def test_role_permission_association(auth_repo):
assert "admin:users:WRITE" in permissions assert "admin:users:WRITE" in permissions
# [/DEF:test_role_permission_association:Function]
# [DEF:test_user_role_association:Function]
# @PURPOSE: Confirms user-role assignment persists and is queryable from repository reads.
# @RELATION: BINDS_TO -> test_auth
def test_user_role_association(auth_repo): def test_user_role_association(auth_repo):
"""Test user and role association""" """Test user and role association"""
role = Role(name="Admin", description="System administrator") role = Role(name="Admin", description="System administrator")
@@ -146,7 +176,7 @@ def test_user_role_association(auth_repo):
username="adminuser", username="adminuser",
email="admin@example.com", email="admin@example.com",
password_hash=get_password_hash("adminpass123"), password_hash=get_password_hash("adminpass123"),
auth_source="LOCAL" auth_source="LOCAL",
) )
user.roles.append(role) user.roles.append(role)
@@ -161,6 +191,12 @@ def test_user_role_association(auth_repo):
assert retrieved_user.roles[0].name == "Admin" assert retrieved_user.roles[0].name == "Admin"
# [/DEF:test_user_role_association:Function]
# [DEF:test_ad_group_mapping:Function]
# @PURPOSE: Verifies AD group mapping rows persist and reference the expected role.
# @RELATION: BINDS_TO -> test_auth
def test_ad_group_mapping(auth_repo): def test_ad_group_mapping(auth_repo):
"""Test AD group mapping""" """Test AD group mapping"""
role = Role(name="ADFS_Admin", description="ADFS administrators") role = Role(name="ADFS_Admin", description="ADFS administrators")
@@ -173,18 +209,28 @@ def test_ad_group_mapping(auth_repo):
auth_repo.db.add(mapping) auth_repo.db.add(mapping)
auth_repo.db.commit() auth_repo.db.commit()
retrieved_mapping = auth_repo.db.query(ADGroupMapping).filter_by(ad_group="DOMAIN\\ADFS_Admins").first() retrieved_mapping = (
auth_repo.db.query(ADGroupMapping)
.filter_by(ad_group="DOMAIN\\ADFS_Admins")
.first()
)
assert retrieved_mapping is not None assert retrieved_mapping is not None
assert retrieved_mapping.role_id == role.id assert retrieved_mapping.role_id == role.id
# [/DEF:test_ad_group_mapping:Function]
# [DEF:test_authenticate_user_updates_last_login:Function]
# @PURPOSE: Verifies successful authentication updates last_login audit field.
# @RELATION: BINDS_TO -> test_auth
def test_authenticate_user_updates_last_login(auth_service, auth_repo): def test_authenticate_user_updates_last_login(auth_service, auth_repo):
"""@SIDE_EFFECT: authenticate_user updates last_login timestamp on success.""" """@SIDE_EFFECT: authenticate_user updates last_login timestamp on success."""
user = User( user = User(
username="loginuser", username="loginuser",
email="login@example.com", email="login@example.com",
password_hash=get_password_hash("mypassword"), password_hash=get_password_hash("mypassword"),
auth_source="LOCAL" auth_source="LOCAL",
) )
auth_repo.db.add(user) auth_repo.db.add(user)
auth_repo.db.commit() auth_repo.db.commit()
@@ -196,6 +242,12 @@ def test_authenticate_user_updates_last_login(auth_service, auth_repo):
assert authenticated.last_login is not None assert authenticated.last_login is not None
# [/DEF:test_authenticate_user_updates_last_login:Function]
# [DEF:test_authenticate_inactive_user:Function]
# @PURPOSE: Verifies inactive accounts are rejected during password authentication.
# @RELATION: BINDS_TO -> test_auth
def test_authenticate_inactive_user(auth_service, auth_repo): def test_authenticate_inactive_user(auth_service, auth_repo):
"""@PRE: User with is_active=False should not authenticate.""" """@PRE: User with is_active=False should not authenticate."""
user = User( user = User(
@@ -203,7 +255,7 @@ def test_authenticate_inactive_user(auth_service, auth_repo):
email="inactive@example.com", email="inactive@example.com",
password_hash=get_password_hash("testpass"), password_hash=get_password_hash("testpass"),
auth_source="LOCAL", auth_source="LOCAL",
is_active=False is_active=False,
) )
auth_repo.db.add(user) auth_repo.db.add(user)
auth_repo.db.commit() auth_repo.db.commit()
@@ -212,12 +264,24 @@ def test_authenticate_inactive_user(auth_service, auth_repo):
assert result is None assert result is None
# [/DEF:test_authenticate_inactive_user:Function]
# [DEF:test_verify_password_empty_hash:Function]
# @PURPOSE: Verifies password verification safely rejects empty or null password hashes.
# @RELATION: BINDS_TO -> test_auth
def test_verify_password_empty_hash(): def test_verify_password_empty_hash():
"""@PRE: verify_password with empty/None hash returns False.""" """@PRE: verify_password with empty/None hash returns False."""
assert verify_password("anypassword", "") is False assert verify_password("anypassword", "") is False
assert verify_password("anypassword", None) is False assert verify_password("anypassword", None) is False
# [/DEF:test_verify_password_empty_hash:Function]
# [DEF:test_provision_adfs_user_new:Function]
# @PURPOSE: Verifies JIT provisioning creates a new ADFS user and maps group-derived roles.
# @RELATION: BINDS_TO -> test_auth
def test_provision_adfs_user_new(auth_service, auth_repo): def test_provision_adfs_user_new(auth_service, auth_repo):
"""@POST: provision_adfs_user creates a new ADFS user with correct roles.""" """@POST: provision_adfs_user creates a new ADFS user with correct roles."""
# Set up a role and AD group mapping # Set up a role and AD group mapping
@@ -232,7 +296,7 @@ def test_provision_adfs_user_new(auth_service, auth_repo):
user_info = { user_info = {
"upn": "newadfsuser@domain.com", "upn": "newadfsuser@domain.com",
"email": "newadfsuser@domain.com", "email": "newadfsuser@domain.com",
"groups": ["DOMAIN\\Viewers"] "groups": ["DOMAIN\\Viewers"],
} }
user = auth_service.provision_adfs_user(user_info) user = auth_service.provision_adfs_user(user_info)
@@ -244,6 +308,12 @@ def test_provision_adfs_user_new(auth_service, auth_repo):
assert user.roles[0].name == "ADFS_Viewer" assert user.roles[0].name == "ADFS_Viewer"
# [/DEF:test_provision_adfs_user_new:Function]
# [DEF:test_provision_adfs_user_existing:Function]
# @PURPOSE: Verifies JIT provisioning reuses existing ADFS user and refreshes role assignments.
# @RELATION: BINDS_TO -> test_auth
def test_provision_adfs_user_existing(auth_service, auth_repo): def test_provision_adfs_user_existing(auth_service, auth_repo):
"""@POST: provision_adfs_user updates roles for existing user.""" """@POST: provision_adfs_user updates roles for existing user."""
# Create existing user # Create existing user
@@ -251,7 +321,7 @@ def test_provision_adfs_user_existing(auth_service, auth_repo):
username="existingadfs@domain.com", username="existingadfs@domain.com",
email="existingadfs@domain.com", email="existingadfs@domain.com",
auth_source="ADFS", auth_source="ADFS",
is_active=True is_active=True,
) )
auth_repo.db.add(existing) auth_repo.db.add(existing)
auth_repo.db.commit() auth_repo.db.commit()
@@ -259,7 +329,7 @@ def test_provision_adfs_user_existing(auth_service, auth_repo):
user_info = { user_info = {
"upn": "existingadfs@domain.com", "upn": "existingadfs@domain.com",
"email": "existingadfs@domain.com", "email": "existingadfs@domain.com",
"groups": [] "groups": [],
} }
user = auth_service.provision_adfs_user(user_info) user = auth_service.provision_adfs_user(user_info)
@@ -269,3 +339,4 @@ def test_provision_adfs_user_existing(auth_service, auth_repo):
# [/DEF:test_auth:Module] # [/DEF:test_auth:Module]
# [/DEF:test_provision_adfs_user_existing:Function]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.core.auth.config:Module] # [DEF:AuthConfigModule:Module]
# #
# @SEMANTICS: auth, config, settings, jwt, adfs # @SEMANTICS: auth, config, settings, jwt, adfs
# @PURPOSE: Centralized configuration for authentication and authorization. # @PURPOSE: Centralized configuration for authentication and authorization.
@@ -16,6 +16,7 @@ from pydantic_settings import BaseSettings
# @PURPOSE: Holds authentication-related settings. # @PURPOSE: Holds authentication-related settings.
# @PRE: Environment variables may be provided via .env file. # @PRE: Environment variables may be provided via .env file.
# @POST: Returns a configuration object with validated settings. # @POST: Returns a configuration object with validated settings.
# @RELATION: INHERITS -> pydantic_settings.BaseSettings
class AuthConfig(BaseSettings): class AuthConfig(BaseSettings):
# JWT Settings # JWT Settings
SECRET_KEY: str = Field(default="super-secret-key-change-in-production", env="AUTH_SECRET_KEY") SECRET_KEY: str = Field(default="super-secret-key-change-in-production", env="AUTH_SECRET_KEY")
@@ -41,7 +42,8 @@ class AuthConfig(BaseSettings):
# [DEF:auth_config:Variable] # [DEF:auth_config:Variable]
# @PURPOSE: Singleton instance of AuthConfig. # @PURPOSE: Singleton instance of AuthConfig.
# @RELATION: DEPENDS_ON -> AuthConfig
auth_config = AuthConfig() auth_config = AuthConfig()
# [/DEF:auth_config:Variable] # [/DEF:auth_config:Variable]
# [/DEF:backend.src.core.auth.config:Module] # [/DEF:AuthConfigModule:Module]

View File

@@ -1,11 +1,11 @@
# [DEF:backend.src.core.auth.jwt:Module] # [DEF:AuthJwtModule:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: jwt, token, session, auth # @SEMANTICS: jwt, token, session, auth
# @PURPOSE: JWT token generation and validation logic. # @PURPOSE: JWT token generation and validation logic.
# @LAYER: Core # @LAYER: Core
# @RELATION: DEPENDS_ON -> jose # @RELATION: DEPENDS_ON -> jose
# @RELATION: USES -> backend.src.core.auth.config.auth_config # @RELATION: USES -> auth_config
# #
# @INVARIANT: Tokens must include expiration time and user identifier. # @INVARIANT: Tokens must include expiration time and user identifier.
@@ -21,6 +21,7 @@ from ..logger import belief_scope
# @PURPOSE: Generates a new JWT access token. # @PURPOSE: Generates a new JWT access token.
# @PRE: data dict contains 'sub' (user_id) and optional 'scopes' (roles). # @PRE: data dict contains 'sub' (user_id) and optional 'scopes' (roles).
# @POST: Returns a signed JWT string. # @POST: Returns a signed JWT string.
# @RELATION: DEPENDS_ON -> auth_config
# #
# @PARAM: data (dict) - Payload data for the token. # @PARAM: data (dict) - Payload data for the token.
# @PARAM: expires_delta (Optional[timedelta]) - Custom expiration time. # @PARAM: expires_delta (Optional[timedelta]) - Custom expiration time.
@@ -42,6 +43,7 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -
# @PURPOSE: Decodes and validates a JWT token. # @PURPOSE: Decodes and validates a JWT token.
# @PRE: token is a signed JWT string. # @PRE: token is a signed JWT string.
# @POST: Returns the decoded payload if valid. # @POST: Returns the decoded payload if valid.
# @RELATION: DEPENDS_ON -> auth_config
# #
# @PARAM: token (str) - The JWT to decode. # @PARAM: token (str) - The JWT to decode.
# @RETURN: dict - The decoded payload. # @RETURN: dict - The decoded payload.
@@ -52,4 +54,4 @@ def decode_token(token: str) -> dict:
return payload return payload
# [/DEF:decode_token:Function] # [/DEF:decode_token:Function]
# [/DEF:backend.src.core.auth.jwt:Module] # [/DEF:AuthJwtModule:Module]

View File

@@ -1,10 +1,10 @@
# [DEF:backend.src.core.auth.logger:Module] # [DEF:AuthLoggerModule:Module]
# #
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: auth, logger, audit, security # @SEMANTICS: auth, logger, audit, security
# @PURPOSE: Audit logging for security-related events. # @PURPOSE: Audit logging for security-related events.
# @LAYER: Core # @LAYER: Core
# @RELATION: USES -> backend.src.core.logger.belief_scope # @RELATION: USES -> belief_scope
# #
# @INVARIANT: Must not log sensitive data like passwords or full tokens. # @INVARIANT: Must not log sensitive data like passwords or full tokens.
@@ -17,6 +17,7 @@ from datetime import datetime
# @PURPOSE: Logs a security-related event for audit trails. # @PURPOSE: Logs a security-related event for audit trails.
# @PRE: event_type and username are strings. # @PRE: event_type and username are strings.
# @POST: Security event is written to the application log. # @POST: Security event is written to the application log.
# @RELATION: USES -> logger
# @PARAM: event_type (str) - Type of event (e.g., LOGIN_SUCCESS, PERMISSION_DENIED). # @PARAM: event_type (str) - Type of event (e.g., LOGIN_SUCCESS, PERMISSION_DENIED).
# @PARAM: username (str) - The user involved in the event. # @PARAM: username (str) - The user involved in the event.
# @PARAM: details (dict) - Additional non-sensitive metadata. # @PARAM: details (dict) - Additional non-sensitive metadata.
@@ -29,4 +30,4 @@ def log_security_event(event_type: str, username: str, details: dict = None):
logger.info(msg) logger.info(msg)
# [/DEF:log_security_event:Function] # [/DEF:log_security_event:Function]
# [/DEF:backend.src.core.auth.logger:Module] # [/DEF:AuthLoggerModule:Module]

View File

@@ -1,10 +1,10 @@
# [DEF:backend.src.core.auth.oauth:Module] # [DEF:AuthOauthModule:Module]
# #
# @SEMANTICS: auth, oauth, oidc, adfs # @SEMANTICS: auth, oauth, oidc, adfs
# @PURPOSE: ADFS OIDC configuration and client using Authlib. # @PURPOSE: ADFS OIDC configuration and client using Authlib.
# @LAYER: Core # @LAYER: Core
# @RELATION: DEPENDS_ON -> authlib # @RELATION: DEPENDS_ON -> authlib
# @RELATION: USES -> backend.src.core.auth.config.auth_config # @RELATION: USES -> auth_config
# #
# @INVARIANT: Must use secure OIDC flows. # @INVARIANT: Must use secure OIDC flows.
@@ -15,6 +15,7 @@ from .config import auth_config
# [DEF:oauth:Variable] # [DEF:oauth:Variable]
# @PURPOSE: Global Authlib OAuth registry. # @PURPOSE: Global Authlib OAuth registry.
# @RELATION: DEPENDS_ON -> OAuth
oauth = OAuth() oauth = OAuth()
# [/DEF:oauth:Variable] # [/DEF:oauth:Variable]
@@ -22,6 +23,8 @@ oauth = OAuth()
# @PURPOSE: Registers the ADFS OIDC client. # @PURPOSE: Registers the ADFS OIDC client.
# @PRE: ADFS configuration is provided in auth_config. # @PRE: ADFS configuration is provided in auth_config.
# @POST: ADFS client is registered in oauth registry. # @POST: ADFS client is registered in oauth registry.
# @RELATION: USES -> oauth
# @RELATION: USES -> auth_config
def register_adfs(): def register_adfs():
if auth_config.ADFS_CLIENT_ID: if auth_config.ADFS_CLIENT_ID:
oauth.register( oauth.register(
@@ -39,6 +42,7 @@ def register_adfs():
# @PURPOSE: Checks if ADFS is properly configured. # @PURPOSE: Checks if ADFS is properly configured.
# @PRE: None. # @PRE: None.
# @POST: Returns True if ADFS client is registered, False otherwise. # @POST: Returns True if ADFS client is registered, False otherwise.
# @RELATION: USES -> oauth
# @RETURN: bool - Configuration status. # @RETURN: bool - Configuration status.
def is_adfs_configured() -> bool: def is_adfs_configured() -> bool:
"""Check if ADFS OAuth client is registered.""" """Check if ADFS OAuth client is registered."""
@@ -48,4 +52,4 @@ def is_adfs_configured() -> bool:
# Initial registration # Initial registration
register_adfs() register_adfs()
# [/DEF:backend.src.core.auth.oauth:Module] # [/DEF:AuthOauthModule:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:AuthRepository:Module] # [DEF:AuthRepositoryModule:Module]
# @TIER: CRITICAL # @TIER: CRITICAL
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: auth, repository, database, user, role, permission # @SEMANTICS: auth, repository, database, user, role, permission
@@ -12,6 +12,9 @@
# @RELATION: DEPENDS_ON ->[belief_scope:Function] # @RELATION: DEPENDS_ON ->[belief_scope:Function]
# @INVARIANT: All database read/write operations must execute via the injected SQLAlchemy session boundary. # @INVARIANT: All database read/write operations must execute via the injected SQLAlchemy session boundary.
# @DATA_CONTRACT: Session -> [User | Role | Permission | UserDashboardPreference] # @DATA_CONTRACT: Session -> [User | Role | Permission | UserDashboardPreference]
# @PRE: Database connection is active.
# @POST: Provides valid access to identity data.
# @SIDE_EFFECT: None at module level.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from typing import List, Optional from typing import List, Optional
@@ -23,6 +26,10 @@ from ..logger import belief_scope, logger
# [DEF:AuthRepository:Class] # [DEF:AuthRepository:Class]
# @PURPOSE: Provides low-level CRUD operations for identity and authorization records. # @PURPOSE: Provides low-level CRUD operations for identity and authorization records.
# @PRE: Database session is bound.
# @POST: Entity instances returned safely.
# @SIDE_EFFECT: Performs database reads.
# @RELATION: DEPENDS_ON -> sqlalchemy.orm.Session
class AuthRepository: class AuthRepository:
# @PURPOSE: Initialize repository with database session. # @PURPOSE: Initialize repository with database session.
def __init__(self, db: Session): def __init__(self, db: Session):
@@ -32,6 +39,7 @@ class AuthRepository:
# @PURPOSE: Retrieve user by UUID. # @PURPOSE: Retrieve user by UUID.
# @PRE: user_id is a valid UUID string. # @PRE: user_id is a valid UUID string.
# @POST: Returns User object if found, else None. # @POST: Returns User object if found, else None.
# @RELATION: DEPENDS_ON -> User
def get_user_by_id(self, user_id: str) -> Optional[User]: def get_user_by_id(self, user_id: str) -> Optional[User]:
with belief_scope("AuthRepository.get_user_by_id"): with belief_scope("AuthRepository.get_user_by_id"):
logger.reason(f"Fetching user by id: {user_id}") logger.reason(f"Fetching user by id: {user_id}")
@@ -44,6 +52,7 @@ class AuthRepository:
# @PURPOSE: Retrieve user by username. # @PURPOSE: Retrieve user by username.
# @PRE: username is a non-empty string. # @PRE: username is a non-empty string.
# @POST: Returns User object if found, else None. # @POST: Returns User object if found, else None.
# @RELATION: DEPENDS_ON -> User
def get_user_by_username(self, username: str) -> Optional[User]: def get_user_by_username(self, username: str) -> Optional[User]:
with belief_scope("AuthRepository.get_user_by_username"): with belief_scope("AuthRepository.get_user_by_username"):
logger.reason(f"Fetching user by username: {username}") logger.reason(f"Fetching user by username: {username}")
@@ -54,6 +63,8 @@ class AuthRepository:
# [DEF:get_role_by_id:Function] # [DEF:get_role_by_id:Function]
# @PURPOSE: Retrieve role by UUID with permissions preloaded. # @PURPOSE: Retrieve role by UUID with permissions preloaded.
# @RELATION: DEPENDS_ON -> Role
# @RELATION: DEPENDS_ON -> Permission
def get_role_by_id(self, role_id: str) -> Optional[Role]: def get_role_by_id(self, role_id: str) -> Optional[Role]:
with belief_scope("AuthRepository.get_role_by_id"): with belief_scope("AuthRepository.get_role_by_id"):
return self.db.query(Role).options(selectinload(Role.permissions)).filter(Role.id == role_id).first() return self.db.query(Role).options(selectinload(Role.permissions)).filter(Role.id == role_id).first()
@@ -61,6 +72,7 @@ class AuthRepository:
# [DEF:get_role_by_name:Function] # [DEF:get_role_by_name:Function]
# @PURPOSE: Retrieve role by unique name. # @PURPOSE: Retrieve role by unique name.
# @RELATION: DEPENDS_ON -> Role
def get_role_by_name(self, name: str) -> Optional[Role]: def get_role_by_name(self, name: str) -> Optional[Role]:
with belief_scope("AuthRepository.get_role_by_name"): with belief_scope("AuthRepository.get_role_by_name"):
return self.db.query(Role).filter(Role.name == name).first() return self.db.query(Role).filter(Role.name == name).first()
@@ -68,6 +80,7 @@ class AuthRepository:
# [DEF:get_permission_by_id:Function] # [DEF:get_permission_by_id:Function]
# @PURPOSE: Retrieve permission by UUID. # @PURPOSE: Retrieve permission by UUID.
# @RELATION: DEPENDS_ON -> Permission
def get_permission_by_id(self, permission_id: str) -> Optional[Permission]: def get_permission_by_id(self, permission_id: str) -> Optional[Permission]:
with belief_scope("AuthRepository.get_permission_by_id"): with belief_scope("AuthRepository.get_permission_by_id"):
return self.db.query(Permission).filter(Permission.id == permission_id).first() return self.db.query(Permission).filter(Permission.id == permission_id).first()
@@ -75,6 +88,7 @@ class AuthRepository:
# [DEF:get_permission_by_resource_action:Function] # [DEF:get_permission_by_resource_action:Function]
# @PURPOSE: Retrieve permission by resource and action tuple. # @PURPOSE: Retrieve permission by resource and action tuple.
# @RELATION: DEPENDS_ON -> Permission
def get_permission_by_resource_action(self, resource: str, action: str) -> Optional[Permission]: def get_permission_by_resource_action(self, resource: str, action: str) -> Optional[Permission]:
with belief_scope("AuthRepository.get_permission_by_resource_action"): with belief_scope("AuthRepository.get_permission_by_resource_action"):
return self.db.query(Permission).filter( return self.db.query(Permission).filter(
@@ -85,6 +99,7 @@ class AuthRepository:
# [DEF:list_permissions:Function] # [DEF:list_permissions:Function]
# @PURPOSE: List all system permissions. # @PURPOSE: List all system permissions.
# @RELATION: DEPENDS_ON -> Permission
def list_permissions(self) -> List[Permission]: def list_permissions(self) -> List[Permission]:
with belief_scope("AuthRepository.list_permissions"): with belief_scope("AuthRepository.list_permissions"):
return self.db.query(Permission).all() return self.db.query(Permission).all()
@@ -92,6 +107,7 @@ class AuthRepository:
# [DEF:get_user_dashboard_preference:Function] # [DEF:get_user_dashboard_preference:Function]
# @PURPOSE: Retrieve dashboard filters/preferences for a user. # @PURPOSE: Retrieve dashboard filters/preferences for a user.
# @RELATION: DEPENDS_ON -> UserDashboardPreference
def get_user_dashboard_preference(self, user_id: str) -> Optional[UserDashboardPreference]: def get_user_dashboard_preference(self, user_id: str) -> Optional[UserDashboardPreference]:
with belief_scope("AuthRepository.get_user_dashboard_preference"): with belief_scope("AuthRepository.get_user_dashboard_preference"):
return self.db.query(UserDashboardPreference).filter( return self.db.query(UserDashboardPreference).filter(
@@ -103,6 +119,8 @@ class AuthRepository:
# @PURPOSE: Retrieve roles that match a list of AD group names. # @PURPOSE: Retrieve roles that match a list of AD group names.
# @PRE: groups is a list of strings representing AD group identifiers. # @PRE: groups is a list of strings representing AD group identifiers.
# @POST: Returns a list of Role objects mapped to the provided AD groups. # @POST: Returns a list of Role objects mapped to the provided AD groups.
# @RELATION: DEPENDS_ON -> Role
# @RELATION: DEPENDS_ON -> ADGroupMapping
def get_roles_by_ad_groups(self, groups: List[str]) -> List[Role]: def get_roles_by_ad_groups(self, groups: List[str]) -> List[Role]:
with belief_scope("AuthRepository.get_roles_by_ad_groups"): with belief_scope("AuthRepository.get_roles_by_ad_groups"):
logger.reason(f"Fetching roles for AD groups: {groups}") logger.reason(f"Fetching roles for AD groups: {groups}")
@@ -115,4 +133,4 @@ class AuthRepository:
# [/DEF:AuthRepository:Class] # [/DEF:AuthRepository:Class]
# [/DEF:AuthRepository:Module] # [/DEF:AuthRepositoryModule:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.core.auth.security:Module] # [DEF:AuthSecurityModule:Module]
# #
# @SEMANTICS: security, password, hashing, bcrypt # @SEMANTICS: security, password, hashing, bcrypt
# @PURPOSE: Utility for password hashing and verification using Passlib. # @PURPOSE: Utility for password hashing and verification using Passlib.
# @LAYER: Core # @LAYER: Core
# @RELATION: DEPENDS_ON -> passlib # @RELATION: DEPENDS_ON -> bcrypt
# #
# @INVARIANT: Uses bcrypt for hashing with standard work factor. # @INVARIANT: Uses bcrypt for hashing with standard work factor.
@@ -15,6 +15,7 @@ import bcrypt
# @PURPOSE: Verifies a plain password against a hashed password. # @PURPOSE: Verifies a plain password against a hashed password.
# @PRE: plain_password is a string, hashed_password is a bcrypt hash. # @PRE: plain_password is a string, hashed_password is a bcrypt hash.
# @POST: Returns True if password matches, False otherwise. # @POST: Returns True if password matches, False otherwise.
# @RELATION: DEPENDS_ON -> bcrypt
# #
# @PARAM: plain_password (str) - The unhashed password. # @PARAM: plain_password (str) - The unhashed password.
# @PARAM: hashed_password (str) - The stored hash. # @PARAM: hashed_password (str) - The stored hash.
@@ -35,6 +36,7 @@ def verify_password(plain_password: str, hashed_password: str) -> bool:
# @PURPOSE: Generates a bcrypt hash for a plain password. # @PURPOSE: Generates a bcrypt hash for a plain password.
# @PRE: password is a string. # @PRE: password is a string.
# @POST: Returns a secure bcrypt hash string. # @POST: Returns a secure bcrypt hash string.
# @RELATION: DEPENDS_ON -> bcrypt
# #
# @PARAM: password (str) - The password to hash. # @PARAM: password (str) - The password to hash.
# @RETURN: str - The generated hash. # @RETURN: str - The generated hash.
@@ -42,4 +44,4 @@ def get_password_hash(password: str) -> str:
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8") return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
# [/DEF:get_password_hash:Function] # [/DEF:get_password_hash:Function]
# [/DEF:backend.src.core.auth.security:Module] # [/DEF:AuthSecurityModule:Module]

View File

@@ -45,7 +45,9 @@ class ConfigManager:
def __init__(self, config_path: str = "config.json"): def __init__(self, config_path: str = "config.json"):
with belief_scope("ConfigManager.__init__"): with belief_scope("ConfigManager.__init__"):
if not isinstance(config_path, str) or not config_path: if not isinstance(config_path, str) or not config_path:
logger.explore("Invalid config_path provided", extra={"path": config_path}) logger.explore(
"Invalid config_path provided", extra={"path": config_path}
)
raise ValueError("config_path must be a non-empty string") raise ValueError("config_path must be a non-empty string")
logger.reason(f"Initializing ConfigManager with legacy path: {config_path}") logger.reason(f"Initializing ConfigManager with legacy path: {config_path}")
@@ -57,10 +59,14 @@ class ConfigManager:
configure_logger(self.config.settings.logging) configure_logger(self.config.settings.logging)
if not isinstance(self.config, AppConfig): if not isinstance(self.config, AppConfig):
logger.explore("Config loading resulted in invalid type", extra={"type": type(self.config)}) logger.explore(
"Config loading resulted in invalid type",
extra={"type": type(self.config)},
)
raise TypeError("self.config must be an instance of AppConfig") raise TypeError("self.config must be an instance of AppConfig")
logger.reflect("ConfigManager initialization complete") logger.reflect("ConfigManager initialization complete")
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:_default_config:Function] # [DEF:_default_config:Function]
@@ -69,6 +75,7 @@ class ConfigManager:
with belief_scope("ConfigManager._default_config"): with belief_scope("ConfigManager._default_config"):
logger.reason("Building default AppConfig fallback") logger.reason("Building default AppConfig fallback")
return AppConfig(environments=[], settings=GlobalSettings()) return AppConfig(environments=[], settings=GlobalSettings())
# [/DEF:_default_config:Function] # [/DEF:_default_config:Function]
# [DEF:_sync_raw_payload_from_config:Function] # [DEF:_sync_raw_payload_from_config:Function]
@@ -83,14 +90,19 @@ class ConfigManager:
logger.reason( logger.reason(
"Synchronized raw payload from typed config", "Synchronized raw payload from typed config",
extra={ extra={
"environments_count": len(merged_payload.get("environments", []) or []), "environments_count": len(
merged_payload.get("environments", []) or []
),
"has_settings": "settings" in merged_payload, "has_settings": "settings" in merged_payload,
"extra_sections": sorted( "extra_sections": sorted(
key for key in merged_payload.keys() if key not in {"environments", "settings"} key
for key in merged_payload.keys()
if key not in {"environments", "settings"}
), ),
}, },
) )
return merged_payload return merged_payload
# [/DEF:_sync_raw_payload_from_config:Function] # [/DEF:_sync_raw_payload_from_config:Function]
# [DEF:_load_from_legacy_file:Function] # [DEF:_load_from_legacy_file:Function]
@@ -104,14 +116,19 @@ class ConfigManager:
) )
return {} return {}
logger.reason("Loading legacy config file", extra={"path": str(self.config_path)}) logger.reason(
"Loading legacy config file", extra={"path": str(self.config_path)}
)
with self.config_path.open("r", encoding="utf-8") as fh: with self.config_path.open("r", encoding="utf-8") as fh:
payload = json.load(fh) payload = json.load(fh)
if not isinstance(payload, dict): if not isinstance(payload, dict):
logger.explore( logger.explore(
"Legacy config payload is not a JSON object", "Legacy config payload is not a JSON object",
extra={"path": str(self.config_path), "type": type(payload).__name__}, extra={
"path": str(self.config_path),
"type": type(payload).__name__,
},
) )
raise ValueError("Legacy config payload must be a JSON object") raise ValueError("Legacy config payload must be a JSON object")
@@ -120,15 +137,23 @@ class ConfigManager:
extra={"path": str(self.config_path), "keys": sorted(payload.keys())}, extra={"path": str(self.config_path), "keys": sorted(payload.keys())},
) )
return payload return payload
# [/DEF:_load_from_legacy_file:Function] # [/DEF:_load_from_legacy_file:Function]
# [DEF:_get_record:Function] # [DEF:_get_record:Function]
# @PURPOSE: Resolve global configuration record from DB. # @PURPOSE: Resolve global configuration record from DB.
def _get_record(self, session: Session) -> Optional[AppConfigRecord]: def _get_record(self, session: Session) -> Optional[AppConfigRecord]:
with belief_scope("ConfigManager._get_record"): with belief_scope("ConfigManager._get_record"):
record = session.query(AppConfigRecord).filter(AppConfigRecord.id == "global").first() record = (
logger.reason("Resolved app config record", extra={"exists": record is not None}) session.query(AppConfigRecord)
.filter(AppConfigRecord.id == "global")
.first()
)
logger.reason(
"Resolved app config record", extra={"exists": record is not None}
)
return record return record
# [/DEF:_get_record:Function] # [/DEF:_get_record:Function]
# [DEF:_load_config:Function] # [DEF:_load_config:Function]
@@ -139,7 +164,10 @@ class ConfigManager:
try: try:
record = self._get_record(session) record = self._get_record(session)
if record and isinstance(record.payload, dict): if record and isinstance(record.payload, dict):
logger.reason("Loading configuration from database", extra={"record_id": record.id}) logger.reason(
"Loading configuration from database",
extra={"record_id": record.id},
)
self.raw_payload = dict(record.payload) self.raw_payload = dict(record.payload)
config = AppConfig.model_validate( config = AppConfig.model_validate(
{ {
@@ -182,7 +210,9 @@ class ConfigManager:
self._save_config_to_db(config, session=session) self._save_config_to_db(config, session=session)
return config return config
logger.reason("No persisted config found; falling back to default configuration") logger.reason(
"No persisted config found; falling back to default configuration"
)
config = self._default_config() config = self._default_config()
self.raw_payload = config.model_dump() self.raw_payload = config.model_dump()
self._save_config_to_db(config, session=session) self._save_config_to_db(config, session=session)
@@ -203,6 +233,7 @@ class ConfigManager:
raise raise
finally: finally:
session.close() session.close()
# [/DEF:_load_config:Function] # [/DEF:_load_config:Function]
# [DEF:_sync_environment_records:Function] # [DEF:_sync_environment_records:Function]
@@ -210,29 +241,32 @@ class ConfigManager:
def _sync_environment_records(self, session: Session, config: AppConfig) -> None: def _sync_environment_records(self, session: Session, config: AppConfig) -> None:
with belief_scope("ConfigManager._sync_environment_records"): with belief_scope("ConfigManager._sync_environment_records"):
configured_envs = list(config.environments or []) configured_envs = list(config.environments or [])
configured_ids = {
str(environment.id or "").strip()
for environment in configured_envs
if str(environment.id or "").strip()
}
persisted_records = session.query(EnvironmentRecord).all() persisted_records = session.query(EnvironmentRecord).all()
persisted_by_id = {str(record.id or "").strip(): record for record in persisted_records} persisted_by_id = {
str(record.id or "").strip(): record for record in persisted_records
}
for environment in configured_envs: for environment in configured_envs:
normalized_id = str(environment.id or "").strip() normalized_id = str(environment.id or "").strip()
if not normalized_id: if not normalized_id:
continue continue
display_name = str(environment.name or normalized_id).strip() or normalized_id display_name = (
str(environment.name or normalized_id).strip() or normalized_id
)
normalized_url = str(environment.url or "").strip() normalized_url = str(environment.url or "").strip()
credentials_id = str(environment.username or "").strip() or normalized_id credentials_id = (
str(environment.username or "").strip() or normalized_id
)
record = persisted_by_id.get(normalized_id) record = persisted_by_id.get(normalized_id)
if record is None: if record is None:
logger.reason( logger.reason(
"Creating relational environment record from typed config", "Creating relational environment record from typed config",
extra={"environment_id": normalized_id, "environment_name": display_name}, extra={
"environment_id": normalized_id,
"environment_name": display_name,
},
) )
session.add( session.add(
EnvironmentRecord( EnvironmentRecord(
@@ -248,20 +282,13 @@ class ConfigManager:
record.url = normalized_url record.url = normalized_url
record.credentials_id = credentials_id record.credentials_id = credentials_id
for record in persisted_records:
normalized_id = str(record.id or "").strip()
if normalized_id and normalized_id not in configured_ids:
logger.reason(
"Removing stale relational environment record absent from typed config",
extra={"environment_id": normalized_id},
)
session.delete(record)
# [/DEF:_sync_environment_records:Function] # [/DEF:_sync_environment_records:Function]
# [DEF:_save_config_to_db:Function] # [DEF:_save_config_to_db:Function]
# @PURPOSE: Persist provided AppConfig into the global DB configuration record. # @PURPOSE: Persist provided AppConfig into the global DB configuration record.
def _save_config_to_db(self, config: AppConfig, session: Optional[Session] = None) -> None: def _save_config_to_db(
self, config: AppConfig, session: Optional[Session] = None
) -> None:
with belief_scope("ConfigManager._save_config_to_db"): with belief_scope("ConfigManager._save_config_to_db"):
owns_session = session is None owns_session = session is None
db = session or SessionLocal() db = session or SessionLocal()
@@ -274,7 +301,10 @@ class ConfigManager:
record = AppConfigRecord(id="global", payload=payload) record = AppConfigRecord(id="global", payload=payload)
db.add(record) db.add(record)
else: else:
logger.reason("Updating existing global app config record", extra={"record_id": record.id}) logger.reason(
"Updating existing global app config record",
extra={"record_id": record.id},
)
record.payload = payload record.payload = payload
self._sync_environment_records(db, config) self._sync_environment_records(db, config)
@@ -283,7 +313,9 @@ class ConfigManager:
logger.reason( logger.reason(
"Configuration persisted to database", "Configuration persisted to database",
extra={ extra={
"environments_count": len(payload.get("environments", []) or []), "environments_count": len(
payload.get("environments", []) or []
),
"payload_keys": sorted(payload.keys()), "payload_keys": sorted(payload.keys()),
}, },
) )
@@ -294,6 +326,7 @@ class ConfigManager:
finally: finally:
if owns_session: if owns_session:
db.close() db.close()
# [/DEF:_save_config_to_db:Function] # [/DEF:_save_config_to_db:Function]
# [DEF:save:Function] # [DEF:save:Function]
@@ -302,6 +335,7 @@ class ConfigManager:
with belief_scope("ConfigManager.save"): with belief_scope("ConfigManager.save"):
logger.reason("Persisting current in-memory configuration") logger.reason("Persisting current in-memory configuration")
self._save_config_to_db(self.config) self._save_config_to_db(self.config)
# [/DEF:save:Function] # [/DEF:save:Function]
# [DEF:get_config:Function] # [DEF:get_config:Function]
@@ -309,6 +343,7 @@ class ConfigManager:
def get_config(self) -> AppConfig: def get_config(self) -> AppConfig:
with belief_scope("ConfigManager.get_config"): with belief_scope("ConfigManager.get_config"):
return self.config return self.config
# [/DEF:get_config:Function] # [/DEF:get_config:Function]
# [DEF:get_payload:Function] # [DEF:get_payload:Function]
@@ -316,6 +351,7 @@ class ConfigManager:
def get_payload(self) -> dict[str, Any]: def get_payload(self) -> dict[str, Any]:
with belief_scope("ConfigManager.get_payload"): with belief_scope("ConfigManager.get_payload"):
return self._sync_raw_payload_from_config() return self._sync_raw_payload_from_config()
# [/DEF:get_payload:Function] # [/DEF:get_payload:Function]
# [DEF:save_config:Function] # [DEF:save_config:Function]
@@ -345,8 +381,12 @@ class ConfigManager:
self._save_config_to_db(typed_config) self._save_config_to_db(typed_config)
return self.config return self.config
logger.explore("Unsupported config type supplied to save_config", extra={"type": type(config).__name__}) logger.explore(
"Unsupported config type supplied to save_config",
extra={"type": type(config).__name__},
)
raise TypeError("config must be AppConfig or dict") raise TypeError("config must be AppConfig or dict")
# [/DEF:save_config:Function] # [/DEF:save_config:Function]
# [DEF:update_global_settings:Function] # [DEF:update_global_settings:Function]
@@ -357,6 +397,7 @@ class ConfigManager:
self.config.settings = settings self.config.settings = settings
self.save() self.save()
return self.config return self.config
# [/DEF:update_global_settings:Function] # [/DEF:update_global_settings:Function]
# [DEF:validate_path:Function] # [DEF:validate_path:Function]
@@ -381,8 +422,11 @@ class ConfigManager:
logger.reason("Path validation succeeded", extra={"path": str(target)}) logger.reason("Path validation succeeded", extra={"path": str(target)})
return True, "OK" return True, "OK"
except Exception as exc: except Exception as exc:
logger.explore("Path validation failed", extra={"path": path, "error": str(exc)}) logger.explore(
"Path validation failed", extra={"path": path, "error": str(exc)}
)
return False, str(exc) return False, str(exc)
# [/DEF:validate_path:Function] # [/DEF:validate_path:Function]
# [DEF:get_environments:Function] # [DEF:get_environments:Function]
@@ -390,6 +434,7 @@ class ConfigManager:
def get_environments(self) -> List[Environment]: def get_environments(self) -> List[Environment]:
with belief_scope("ConfigManager.get_environments"): with belief_scope("ConfigManager.get_environments"):
return list(self.config.environments) return list(self.config.environments)
# [/DEF:get_environments:Function] # [/DEF:get_environments:Function]
# [DEF:has_environments:Function] # [DEF:has_environments:Function]
@@ -397,6 +442,7 @@ class ConfigManager:
def has_environments(self) -> bool: def has_environments(self) -> bool:
with belief_scope("ConfigManager.has_environments"): with belief_scope("ConfigManager.has_environments"):
return len(self.config.environments) > 0 return len(self.config.environments) > 0
# [/DEF:has_environments:Function] # [/DEF:has_environments:Function]
# [DEF:get_environment:Function] # [DEF:get_environment:Function]
@@ -411,13 +457,21 @@ class ConfigManager:
if env.id == normalized or env.name == normalized: if env.id == normalized or env.name == normalized:
return env return env
return None return None
# [/DEF:get_environment:Function] # [/DEF:get_environment:Function]
# [DEF:add_environment:Function] # [DEF:add_environment:Function]
# @PURPOSE: Upsert environment by id into configuration and persist. # @PURPOSE: Upsert environment by id into configuration and persist.
def add_environment(self, env: Environment) -> AppConfig: def add_environment(self, env: Environment) -> AppConfig:
with belief_scope("ConfigManager.add_environment", f"env_id={env.id}"): with belief_scope("ConfigManager.add_environment", f"env_id={env.id}"):
existing_index = next((i for i, item in enumerate(self.config.environments) if item.id == env.id), None) existing_index = next(
(
i
for i, item in enumerate(self.config.environments)
if item.id == env.id
),
None,
)
if env.is_default: if env.is_default:
for item in self.config.environments: for item in self.config.environments:
item.is_default = False item.is_default = False
@@ -426,14 +480,20 @@ class ConfigManager:
logger.reason("Appending new environment", extra={"env_id": env.id}) logger.reason("Appending new environment", extra={"env_id": env.id})
self.config.environments.append(env) self.config.environments.append(env)
else: else:
logger.reason("Replacing existing environment during add", extra={"env_id": env.id}) logger.reason(
"Replacing existing environment during add",
extra={"env_id": env.id},
)
self.config.environments[existing_index] = env self.config.environments[existing_index] = env
if len(self.config.environments) == 1 and not any(item.is_default for item in self.config.environments): if len(self.config.environments) == 1 and not any(
item.is_default for item in self.config.environments
):
self.config.environments[0].is_default = True self.config.environments[0].is_default = True
self.save() self.save()
return self.config return self.config
# [/DEF:add_environment:Function] # [/DEF:add_environment:Function]
# [DEF:update_environment:Function] # [DEF:update_environment:Function]
@@ -461,8 +521,11 @@ class ConfigManager:
self.save() self.save()
return True return True
logger.explore("Environment update skipped; env not found", extra={"env_id": env_id}) logger.explore(
"Environment update skipped; env not found", extra={"env_id": env_id}
)
return False return False
# [/DEF:update_environment:Function] # [/DEF:update_environment:Function]
# [DEF:delete_environment:Function] # [DEF:delete_environment:Function]
@@ -471,22 +534,35 @@ class ConfigManager:
with belief_scope("ConfigManager.delete_environment", f"env_id={env_id}"): with belief_scope("ConfigManager.delete_environment", f"env_id={env_id}"):
before = len(self.config.environments) before = len(self.config.environments)
removed = [env for env in self.config.environments if env.id == env_id] removed = [env for env in self.config.environments if env.id == env_id]
self.config.environments = [env for env in self.config.environments if env.id != env_id] self.config.environments = [
env for env in self.config.environments if env.id != env_id
]
if len(self.config.environments) == before: if len(self.config.environments) == before:
logger.explore("Environment delete skipped; env not found", extra={"env_id": env_id}) logger.explore(
"Environment delete skipped; env not found",
extra={"env_id": env_id},
)
return False return False
if removed and removed[0].is_default and self.config.environments: if removed and removed[0].is_default and self.config.environments:
self.config.environments[0].is_default = True self.config.environments[0].is_default = True
if self.config.settings.default_environment_id == env_id: if self.config.settings.default_environment_id == env_id:
replacement = next((env.id for env in self.config.environments if env.is_default), None) replacement = next(
(env.id for env in self.config.environments if env.is_default), None
)
self.config.settings.default_environment_id = replacement self.config.settings.default_environment_id = replacement
logger.reason("Environment deleted", extra={"env_id": env_id, "remaining": len(self.config.environments)}) logger.reason(
"Environment deleted",
extra={"env_id": env_id, "remaining": len(self.config.environments)},
)
self.save() self.save()
return True return True
# [/DEF:delete_environment:Function] # [/DEF:delete_environment:Function]
# [/DEF:ConfigManager:Class] # [/DEF:ConfigManager:Class]
# [/DEF:ConfigManager:Module] # [/DEF:ConfigManager:Module]

View File

@@ -44,6 +44,7 @@ def reset_logger_state():
# [DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function] # [DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level. # @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG. # @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Entry, Action, and Exit tags at DEBUG level. # @POST: Logs are verified to contain Entry, Action, and Exit tags at DEBUG level.
@@ -76,6 +77,7 @@ def test_belief_scope_logs_entry_action_exit_at_debug(caplog):
# [DEF:test_belief_scope_error_handling:Function] # [DEF:test_belief_scope_error_handling:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that belief_scope logs Coherence:Failed on exception. # @PURPOSE: Test that belief_scope logs Coherence:Failed on exception.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG. # @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:Failed tag. # @POST: Logs are verified to contain Coherence:Failed tag.
@@ -108,6 +110,7 @@ def test_belief_scope_error_handling(caplog):
# [DEF:test_belief_scope_success_coherence:Function] # [DEF:test_belief_scope_success_coherence:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that belief_scope logs Coherence:OK on success. # @PURPOSE: Test that belief_scope logs Coherence:OK on success.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG. # @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:OK tag. # @POST: Logs are verified to contain Coherence:OK tag.
@@ -135,6 +138,7 @@ def test_belief_scope_success_coherence(caplog):
# [DEF:test_belief_scope_not_visible_at_info:Function] # [DEF:test_belief_scope_not_visible_at_info:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level. # @PURPOSE: Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level.
# @PRE: belief_scope is available. caplog fixture is used. # @PRE: belief_scope is available. caplog fixture is used.
# @POST: Entry/Exit/Coherence logs are not captured at INFO level. # @POST: Entry/Exit/Coherence logs are not captured at INFO level.
@@ -157,6 +161,7 @@ def test_belief_scope_not_visible_at_info(caplog):
# [DEF:test_task_log_level_default:Function] # [DEF:test_task_log_level_default:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that default task log level is INFO. # @PURPOSE: Test that default task log level is INFO.
# @PRE: None. # @PRE: None.
# @POST: Default level is INFO. # @POST: Default level is INFO.
@@ -168,6 +173,7 @@ def test_task_log_level_default():
# [DEF:test_should_log_task_level:Function] # [DEF:test_should_log_task_level:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that should_log_task_level correctly filters log levels. # @PURPOSE: Test that should_log_task_level correctly filters log levels.
# @PRE: None. # @PRE: None.
# @POST: Filtering works correctly for all level combinations. # @POST: Filtering works correctly for all level combinations.
@@ -182,6 +188,7 @@ def test_should_log_task_level():
# [DEF:test_configure_logger_task_log_level:Function] # [DEF:test_configure_logger_task_log_level:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that configure_logger updates task_log_level. # @PURPOSE: Test that configure_logger updates task_log_level.
# @PRE: LoggingConfig is available. # @PRE: LoggingConfig is available.
# @POST: task_log_level is updated correctly. # @POST: task_log_level is updated correctly.
@@ -200,6 +207,7 @@ def test_configure_logger_task_log_level():
# [DEF:test_enable_belief_state_flag:Function] # [DEF:test_enable_belief_state_flag:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test that enable_belief_state flag controls belief_scope logging. # @PURPOSE: Test that enable_belief_state flag controls belief_scope logging.
# @PRE: LoggingConfig is available. caplog fixture is used. # @PRE: LoggingConfig is available. caplog fixture is used.
# @POST: belief_scope logs are controlled by the flag. # @POST: belief_scope logs are controlled by the flag.
@@ -229,6 +237,7 @@ def test_enable_belief_state_flag(caplog):
# [DEF:test_belief_scope_missing_anchor:Function] # [DEF:test_belief_scope_missing_anchor:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test @PRE condition: anchor_id must be provided # @PURPOSE: Test @PRE condition: anchor_id must be provided
def test_belief_scope_missing_anchor(): def test_belief_scope_missing_anchor():
"""Test that belief_scope enforces anchor_id to be provided.""" """Test that belief_scope enforces anchor_id to be provided."""
@@ -241,6 +250,7 @@ def test_belief_scope_missing_anchor():
# [/DEF:test_belief_scope_missing_anchor:Function] # [/DEF:test_belief_scope_missing_anchor:Function]
# [DEF:test_configure_logger_post_conditions:Function] # [DEF:test_configure_logger_post_conditions:Function]
# @RELATION: BINDS_TO -> test_logger
# @PURPOSE: Test @POST condition: Logger level, handlers, belief state flag, and task log level are updated. # @PURPOSE: Test @POST condition: Logger level, handlers, belief state flag, and task log level are updated.
def test_configure_logger_post_conditions(tmp_path): def test_configure_logger_post_conditions(tmp_path):
"""Test that configure_logger satisfies all @POST conditions.""" """Test that configure_logger satisfies all @POST conditions."""

View File

@@ -19,13 +19,17 @@ from ..logger import logger, belief_scope
# [DEF:MigrationArchiveParser:Class] # [DEF:MigrationArchiveParser:Class]
# @PURPOSE: Extract normalized dashboards/charts/datasets metadata from ZIP archives. # @PURPOSE: Extract normalized dashboards/charts/datasets metadata from ZIP archives.
# @RELATION: CONTAINS -> [extract_objects_from_zip, _collect_yaml_objects, _normalize_object_payload]
class MigrationArchiveParser: class MigrationArchiveParser:
# [DEF:extract_objects_from_zip:Function] # [DEF:extract_objects_from_zip:Function]
# @PURPOSE: Extract object catalogs from Superset archive. # @PURPOSE: Extract object catalogs from Superset archive.
# @RELATION: DEPENDS_ON -> _collect_yaml_objects
# @PRE: zip_path points to a valid readable ZIP. # @PRE: zip_path points to a valid readable ZIP.
# @POST: Returns object lists grouped by resource type. # @POST: Returns object lists grouped by resource type.
# @RETURN: Dict[str, List[Dict[str, Any]]] # @RETURN: Dict[str, List[Dict[str, Any]]]
def extract_objects_from_zip(self, zip_path: str) -> Dict[str, List[Dict[str, Any]]]: def extract_objects_from_zip(
self, zip_path: str
) -> Dict[str, List[Dict[str, Any]]]:
with belief_scope("MigrationArchiveParser.extract_objects_from_zip"): with belief_scope("MigrationArchiveParser.extract_objects_from_zip"):
result: Dict[str, List[Dict[str, Any]]] = { result: Dict[str, List[Dict[str, Any]]] = {
"dashboards": [], "dashboards": [],
@@ -37,20 +41,28 @@ class MigrationArchiveParser:
with zipfile.ZipFile(zip_path, "r") as zip_file: with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(temp_dir) zip_file.extractall(temp_dir)
result["dashboards"] = self._collect_yaml_objects(temp_dir, "dashboards") result["dashboards"] = self._collect_yaml_objects(
temp_dir, "dashboards"
)
result["charts"] = self._collect_yaml_objects(temp_dir, "charts") result["charts"] = self._collect_yaml_objects(temp_dir, "charts")
result["datasets"] = self._collect_yaml_objects(temp_dir, "datasets") result["datasets"] = self._collect_yaml_objects(temp_dir, "datasets")
return result return result
# [/DEF:extract_objects_from_zip:Function] # [/DEF:extract_objects_from_zip:Function]
# [DEF:_collect_yaml_objects:Function] # [DEF:_collect_yaml_objects:Function]
# @PURPOSE: Read and normalize YAML manifests for one object type. # @PURPOSE: Read and normalize YAML manifests for one object type.
# @RELATION: DEPENDS_ON -> _normalize_object_payload
# @PRE: object_type is one of dashboards/charts/datasets. # @PRE: object_type is one of dashboards/charts/datasets.
# @POST: Returns only valid normalized objects. # @POST: Returns only valid normalized objects.
def _collect_yaml_objects(self, root_dir: Path, object_type: str) -> List[Dict[str, Any]]: def _collect_yaml_objects(
self, root_dir: Path, object_type: str
) -> List[Dict[str, Any]]:
with belief_scope("MigrationArchiveParser._collect_yaml_objects"): with belief_scope("MigrationArchiveParser._collect_yaml_objects"):
files = list(root_dir.glob(f"**/{object_type}/**/*.yaml")) + list(root_dir.glob(f"**/{object_type}/*.yaml")) files = list(root_dir.glob(f"**/{object_type}/**/*.yaml")) + list(
root_dir.glob(f"**/{object_type}/*.yaml")
)
objects: List[Dict[str, Any]] = [] objects: List[Dict[str, Any]] = []
for file_path in set(files): for file_path in set(files):
try: try:
@@ -66,13 +78,16 @@ class MigrationArchiveParser:
exc, exc,
) )
return objects return objects
# [/DEF:_collect_yaml_objects:Function] # [/DEF:_collect_yaml_objects:Function]
# [DEF:_normalize_object_payload:Function] # [DEF:_normalize_object_payload:Function]
# @PURPOSE: Convert raw YAML payload to stable diff signature shape. # @PURPOSE: Convert raw YAML payload to stable diff signature shape.
# @PRE: payload is parsed YAML mapping. # @PRE: payload is parsed YAML mapping.
# @POST: Returns normalized descriptor with `uuid`, `title`, and `signature`. # @POST: Returns normalized descriptor with `uuid`, `title`, and `signature`.
def _normalize_object_payload(self, payload: Dict[str, Any], object_type: str) -> Optional[Dict[str, Any]]: def _normalize_object_payload(
self, payload: Dict[str, Any], object_type: str
) -> Optional[Dict[str, Any]]:
with belief_scope("MigrationArchiveParser._normalize_object_payload"): with belief_scope("MigrationArchiveParser._normalize_object_payload"):
if not isinstance(payload, dict): if not isinstance(payload, dict):
return None return None
@@ -111,7 +126,8 @@ class MigrationArchiveParser:
"uuid": str(uuid), "uuid": str(uuid),
"title": title or f"Chart {uuid}", "title": title or f"Chart {uuid}",
"signature": json.dumps(signature, sort_keys=True, default=str), "signature": json.dumps(signature, sort_keys=True, default=str),
"dataset_uuid": payload.get("datasource_uuid") or payload.get("dataset_uuid"), "dataset_uuid": payload.get("datasource_uuid")
or payload.get("dataset_uuid"),
} }
if object_type == "datasets": if object_type == "datasets":
@@ -132,6 +148,7 @@ class MigrationArchiveParser:
} }
return None return None
# [/DEF:_normalize_object_payload:Function] # [/DEF:_normalize_object_payload:Function]

View File

@@ -27,6 +27,7 @@ from ..utils.fileio import create_temp_file
# [DEF:MigrationDryRunService:Class] # [DEF:MigrationDryRunService:Class]
# @PURPOSE: Build deterministic diff/risk payload for migration pre-flight. # @PURPOSE: Build deterministic diff/risk payload for migration pre-flight.
# @RELATION: CONTAINS -> [__init__, run, _load_db_mapping, _accumulate_objects, _index_by_uuid, _build_object_diff, _build_target_signatures, _build_risks]
class MigrationDryRunService: class MigrationDryRunService:
# [DEF:__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Wire parser dependency for archive object extraction. # @PURPOSE: Wire parser dependency for archive object extraction.
@@ -34,10 +35,12 @@ class MigrationDryRunService:
# @POST: Service is ready to calculate dry-run payload. # @POST: Service is ready to calculate dry-run payload.
def __init__(self, parser: MigrationArchiveParser | None = None): def __init__(self, parser: MigrationArchiveParser | None = None):
self.parser = parser or MigrationArchiveParser() self.parser = parser or MigrationArchiveParser()
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:run:Function] # [DEF:run:Function]
# @PURPOSE: Execute full dry-run computation for selected dashboards. # @PURPOSE: Execute full dry-run computation for selected dashboards.
# @RELATION: DEPENDS_ON -> [_load_db_mapping, _accumulate_objects, _build_target_signatures, _build_object_diff, _build_risks]
# @PRE: source/target clients are authenticated and selection validated by caller. # @PRE: source/target clients are authenticated and selection validated by caller.
# @POST: Returns JSON-serializable pre-flight payload with summary, diff and risk. # @POST: Returns JSON-serializable pre-flight payload with summary, diff and risk.
# @SIDE_EFFECT: Reads source export archives and target metadata via network. # @SIDE_EFFECT: Reads source export archives and target metadata via network.
@@ -49,9 +52,15 @@ class MigrationDryRunService:
db: Session, db: Session,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
with belief_scope("MigrationDryRunService.run"): with belief_scope("MigrationDryRunService.run"):
logger.explore("[MigrationDryRunService.run][EXPLORE] starting dry-run pipeline") logger.explore(
"[MigrationDryRunService.run][EXPLORE] starting dry-run pipeline"
)
engine = MigrationEngine() engine = MigrationEngine()
db_mapping = self._load_db_mapping(db, selection) if selection.replace_db_config else {} db_mapping = (
self._load_db_mapping(db, selection)
if selection.replace_db_config
else {}
)
transformed = {"dashboards": {}, "charts": {}, "datasets": {}} transformed = {"dashboards": {}, "charts": {}, "datasets": {}}
dashboards_preview = source_client.get_dashboards_summary() dashboards_preview = source_client.get_dashboards_summary()
@@ -63,7 +72,9 @@ class MigrationDryRunService:
for dashboard_id in selection.selected_ids: for dashboard_id in selection.selected_ids:
exported_content, _ = source_client.export_dashboard(int(dashboard_id)) exported_content, _ = source_client.export_dashboard(int(dashboard_id))
with create_temp_file(content=exported_content, suffix=".zip") as source_zip: with create_temp_file(
content=exported_content, suffix=".zip"
) as source_zip:
with create_temp_file(suffix=".zip") as transformed_zip: with create_temp_file(suffix=".zip") as transformed_zip:
success = engine.transform_zip( success = engine.transform_zip(
str(source_zip), str(source_zip),
@@ -74,23 +85,46 @@ class MigrationDryRunService:
fix_cross_filters=selection.fix_cross_filters, fix_cross_filters=selection.fix_cross_filters,
) )
if not success: if not success:
raise ValueError(f"Failed to transform export archive for dashboard {dashboard_id}") raise ValueError(
extracted = self.parser.extract_objects_from_zip(str(transformed_zip)) f"Failed to transform export archive for dashboard {dashboard_id}"
)
extracted = self.parser.extract_objects_from_zip(
str(transformed_zip)
)
self._accumulate_objects(transformed, extracted) self._accumulate_objects(transformed, extracted)
source_objects = {key: list(value.values()) for key, value in transformed.items()} source_objects = {
key: list(value.values()) for key, value in transformed.items()
}
target_objects = self._build_target_signatures(target_client) target_objects = self._build_target_signatures(target_client)
diff = { diff = {
"dashboards": self._build_object_diff(source_objects["dashboards"], target_objects["dashboards"]), "dashboards": self._build_object_diff(
"charts": self._build_object_diff(source_objects["charts"], target_objects["charts"]), source_objects["dashboards"], target_objects["dashboards"]
"datasets": self._build_object_diff(source_objects["datasets"], target_objects["datasets"]), ),
"charts": self._build_object_diff(
source_objects["charts"], target_objects["charts"]
),
"datasets": self._build_object_diff(
source_objects["datasets"], target_objects["datasets"]
),
} }
risk = self._build_risks(source_objects, target_objects, diff, target_client) risk = self._build_risks(
source_objects, target_objects, diff, target_client
)
summary = { summary = {
"dashboards": {action: len(diff["dashboards"][action]) for action in ("create", "update", "delete")}, "dashboards": {
"charts": {action: len(diff["charts"][action]) for action in ("create", "update", "delete")}, action: len(diff["dashboards"][action])
"datasets": {action: len(diff["datasets"][action]) for action in ("create", "update", "delete")}, for action in ("create", "update", "delete")
},
"charts": {
action: len(diff["charts"][action])
for action in ("create", "update", "delete")
},
"datasets": {
action: len(diff["datasets"][action])
for action in ("create", "update", "delete")
},
"selected_dashboards": len(selection.selected_ids), "selected_dashboards": len(selection.selected_ids),
} }
selected_titles = [ selected_titles = [
@@ -99,7 +133,9 @@ class MigrationDryRunService:
if dash_id in selected_preview if dash_id in selected_preview
] ]
logger.reason("[MigrationDryRunService.run][REASON] dry-run payload assembled") logger.reason(
"[MigrationDryRunService.run][REASON] dry-run payload assembled"
)
return { return {
"generated_at": datetime.now(timezone.utc).isoformat(), "generated_at": datetime.now(timezone.utc).isoformat(),
"selection": selection.model_dump(), "selection": selection.model_dump(),
@@ -108,42 +144,61 @@ class MigrationDryRunService:
"summary": summary, "summary": summary,
"risk": score_risks(risk), "risk": score_risks(risk),
} }
# [/DEF:run:Function] # [/DEF:run:Function]
# [DEF:_load_db_mapping:Function] # [DEF:_load_db_mapping:Function]
# @PURPOSE: Resolve UUID mapping for optional DB config replacement. # @PURPOSE: Resolve UUID mapping for optional DB config replacement.
def _load_db_mapping(self, db: Session, selection: DashboardSelection) -> Dict[str, str]: def _load_db_mapping(
rows = db.query(DatabaseMapping).filter( self, db: Session, selection: DashboardSelection
) -> Dict[str, str]:
rows = (
db.query(DatabaseMapping)
.filter(
DatabaseMapping.source_env_id == selection.source_env_id, DatabaseMapping.source_env_id == selection.source_env_id,
DatabaseMapping.target_env_id == selection.target_env_id, DatabaseMapping.target_env_id == selection.target_env_id,
).all() )
.all()
)
return {row.source_db_uuid: row.target_db_uuid for row in rows} return {row.source_db_uuid: row.target_db_uuid for row in rows}
# [/DEF:_load_db_mapping:Function] # [/DEF:_load_db_mapping:Function]
# [DEF:_accumulate_objects:Function] # [DEF:_accumulate_objects:Function]
# @PURPOSE: Merge extracted resources by UUID to avoid duplicates. # @PURPOSE: Merge extracted resources by UUID to avoid duplicates.
def _accumulate_objects(self, target: Dict[str, Dict[str, Dict[str, Any]]], source: Dict[str, List[Dict[str, Any]]]) -> None: def _accumulate_objects(
self,
target: Dict[str, Dict[str, Dict[str, Any]]],
source: Dict[str, List[Dict[str, Any]]],
) -> None:
for object_type in ("dashboards", "charts", "datasets"): for object_type in ("dashboards", "charts", "datasets"):
for item in source.get(object_type, []): for item in source.get(object_type, []):
uuid = item.get("uuid") uuid = item.get("uuid")
if uuid: if uuid:
target[object_type][str(uuid)] = item target[object_type][str(uuid)] = item
# [/DEF:_accumulate_objects:Function] # [/DEF:_accumulate_objects:Function]
# [DEF:_index_by_uuid:Function] # [DEF:_index_by_uuid:Function]
# @PURPOSE: Build UUID-index map for normalized resources. # @PURPOSE: Build UUID-index map for normalized resources.
def _index_by_uuid(self, objects: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: def _index_by_uuid(
self, objects: List[Dict[str, Any]]
) -> Dict[str, Dict[str, Any]]:
indexed: Dict[str, Dict[str, Any]] = {} indexed: Dict[str, Dict[str, Any]] = {}
for obj in objects: for obj in objects:
uuid = obj.get("uuid") uuid = obj.get("uuid")
if uuid: if uuid:
indexed[str(uuid)] = obj indexed[str(uuid)] = obj
return indexed return indexed
# [/DEF:_index_by_uuid:Function] # [/DEF:_index_by_uuid:Function]
# [DEF:_build_object_diff:Function] # [DEF:_build_object_diff:Function]
# @PURPOSE: Compute create/update/delete buckets by UUID+signature. # @PURPOSE: Compute create/update/delete buckets by UUID+signature.
def _build_object_diff(self, source_objects: List[Dict[str, Any]], target_objects: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]: # @RELATION: DEPENDS_ON -> _index_by_uuid
def _build_object_diff(
self, source_objects: List[Dict[str, Any]], target_objects: List[Dict[str, Any]]
) -> Dict[str, List[Dict[str, Any]]]:
target_index = self._index_by_uuid(target_objects) target_index = self._index_by_uuid(target_objects)
created: List[Dict[str, Any]] = [] created: List[Dict[str, Any]] = []
updated: List[Dict[str, Any]] = [] updated: List[Dict[str, Any]] = []
@@ -155,67 +210,128 @@ class MigrationDryRunService:
created.append({"uuid": source_uuid, "title": source_obj.get("title")}) created.append({"uuid": source_uuid, "title": source_obj.get("title")})
continue continue
if source_obj.get("signature") != target_obj.get("signature"): if source_obj.get("signature") != target_obj.get("signature"):
updated.append({ updated.append(
{
"uuid": source_uuid, "uuid": source_uuid,
"title": source_obj.get("title"), "title": source_obj.get("title"),
"target_title": target_obj.get("title"), "target_title": target_obj.get("title"),
}) }
)
return {"create": created, "update": updated, "delete": deleted} return {"create": created, "update": updated, "delete": deleted}
# [/DEF:_build_object_diff:Function] # [/DEF:_build_object_diff:Function]
# [DEF:_build_target_signatures:Function] # [DEF:_build_target_signatures:Function]
# @PURPOSE: Pull target metadata and normalize it into comparable signatures. # @PURPOSE: Pull target metadata and normalize it into comparable signatures.
def _build_target_signatures(self, client: SupersetClient) -> Dict[str, List[Dict[str, Any]]]: def _build_target_signatures(
_, dashboards = client.get_dashboards(query={ self, client: SupersetClient
"columns": ["uuid", "dashboard_title", "slug", "position_json", "json_metadata", "description", "owners"], ) -> Dict[str, List[Dict[str, Any]]]:
}) _, dashboards = client.get_dashboards(
_, datasets = client.get_datasets(query={ query={
"columns": ["uuid", "table_name", "schema", "database_uuid", "sql", "columns", "metrics"], "columns": [
}) "uuid",
_, charts = client.get_charts(query={ "dashboard_title",
"columns": ["uuid", "slice_name", "viz_type", "params", "query_context", "datasource_uuid", "dataset_uuid"], "slug",
}) "position_json",
"json_metadata",
"description",
"owners",
],
}
)
_, datasets = client.get_datasets(
query={
"columns": [
"uuid",
"table_name",
"schema",
"database_uuid",
"sql",
"columns",
"metrics",
],
}
)
_, charts = client.get_charts(
query={
"columns": [
"uuid",
"slice_name",
"viz_type",
"params",
"query_context",
"datasource_uuid",
"dataset_uuid",
],
}
)
return { return {
"dashboards": [{ "dashboards": [
{
"uuid": str(item.get("uuid")), "uuid": str(item.get("uuid")),
"title": item.get("dashboard_title"), "title": item.get("dashboard_title"),
"owners": item.get("owners") or [], "owners": item.get("owners") or [],
"signature": json.dumps({ "signature": json.dumps(
{
"title": item.get("dashboard_title"), "title": item.get("dashboard_title"),
"slug": item.get("slug"), "slug": item.get("slug"),
"position_json": item.get("position_json"), "position_json": item.get("position_json"),
"json_metadata": item.get("json_metadata"), "json_metadata": item.get("json_metadata"),
"description": item.get("description"), "description": item.get("description"),
"owners": item.get("owners"), "owners": item.get("owners"),
}, sort_keys=True, default=str), },
} for item in dashboards if item.get("uuid")], sort_keys=True,
"datasets": [{ default=str,
),
}
for item in dashboards
if item.get("uuid")
],
"datasets": [
{
"uuid": str(item.get("uuid")), "uuid": str(item.get("uuid")),
"title": item.get("table_name"), "title": item.get("table_name"),
"database_uuid": item.get("database_uuid"), "database_uuid": item.get("database_uuid"),
"signature": json.dumps({ "signature": json.dumps(
{
"title": item.get("table_name"), "title": item.get("table_name"),
"schema": item.get("schema"), "schema": item.get("schema"),
"database_uuid": item.get("database_uuid"), "database_uuid": item.get("database_uuid"),
"sql": item.get("sql"), "sql": item.get("sql"),
"columns": item.get("columns"), "columns": item.get("columns"),
"metrics": item.get("metrics"), "metrics": item.get("metrics"),
}, sort_keys=True, default=str), },
} for item in datasets if item.get("uuid")], sort_keys=True,
"charts": [{ default=str,
),
}
for item in datasets
if item.get("uuid")
],
"charts": [
{
"uuid": str(item.get("uuid")), "uuid": str(item.get("uuid")),
"title": item.get("slice_name") or item.get("name"), "title": item.get("slice_name") or item.get("name"),
"dataset_uuid": item.get("datasource_uuid") or item.get("dataset_uuid"), "dataset_uuid": item.get("datasource_uuid")
"signature": json.dumps({ or item.get("dataset_uuid"),
"signature": json.dumps(
{
"title": item.get("slice_name") or item.get("name"), "title": item.get("slice_name") or item.get("name"),
"viz_type": item.get("viz_type"), "viz_type": item.get("viz_type"),
"params": item.get("params"), "params": item.get("params"),
"query_context": item.get("query_context"), "query_context": item.get("query_context"),
"datasource_uuid": item.get("datasource_uuid"), "datasource_uuid": item.get("datasource_uuid"),
"dataset_uuid": item.get("dataset_uuid"), "dataset_uuid": item.get("dataset_uuid"),
}, sort_keys=True, default=str), },
} for item in charts if item.get("uuid")], sort_keys=True,
default=str,
),
} }
for item in charts
if item.get("uuid")
],
}
# [/DEF:_build_target_signatures:Function] # [/DEF:_build_target_signatures:Function]
# [DEF:_build_risks:Function] # [DEF:_build_risks:Function]
@@ -228,6 +344,7 @@ class MigrationDryRunService:
target_client: SupersetClient, target_client: SupersetClient,
) -> List[Dict[str, Any]]: ) -> List[Dict[str, Any]]:
return build_risks(source_objects, target_objects, diff, target_client) return build_risks(source_objects, target_objects, diff, target_client)
# [/DEF:_build_risks:Function] # [/DEF:_build_risks:Function]

View File

@@ -3,8 +3,9 @@
# @SEMANTICS: migration, dry_run, risk, scoring, preflight # @SEMANTICS: migration, dry_run, risk, scoring, preflight
# @PURPOSE: Compute deterministic migration risk items and aggregate score for dry-run reporting. # @PURPOSE: Compute deterministic migration risk items and aggregate score for dry-run reporting.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: [DEPENDS_ON] ->[backend.src.core.superset_client.SupersetClient] # @RELATION: DEPENDS_ON -> [backend.src.core.superset_client.SupersetClient]
# @RELATION: [DISPATCHES] ->[backend.src.core.migration.dry_run_orchestrator.MigrationDryRunService.run] # @RELATION: DISPATCHED_BY -> [backend.src.core.migration.dry_run_orchestrator.MigrationDryRunService.run]
# @RELATION: CONTAINS -> [index_by_uuid, extract_owner_identifiers, build_risks, score_risks]
# @INVARIANT: Risk scoring must remain bounded to [0,100] and preserve severity-to-weight mapping. # @INVARIANT: Risk scoring must remain bounded to [0,100] and preserve severity-to-weight mapping.
# @TEST_CONTRACT: [source_objects,target_objects,diff,target_client] -> [List[RiskItem]] # @TEST_CONTRACT: [source_objects,target_objects,diff,target_client] -> [List[RiskItem]]
# @TEST_SCENARIO: [overwrite_update_objects] -> [medium overwrite_existing risk is emitted for each update diff item] # @TEST_SCENARIO: [overwrite_update_objects] -> [medium overwrite_existing risk is emitted for each update diff item]
@@ -41,6 +42,8 @@ def index_by_uuid(objects: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
indexed[str(uuid)] = obj indexed[str(uuid)] = obj
logger.reflect("UUID index built", extra={"indexed_count": len(indexed)}) logger.reflect("UUID index built", extra={"indexed_count": len(indexed)})
return indexed return indexed
# [/DEF:index_by_uuid:Function] # [/DEF:index_by_uuid:Function]
@@ -66,13 +69,18 @@ def extract_owner_identifiers(owners: Any) -> List[str]:
elif owner is not None: elif owner is not None:
ids.append(str(owner)) ids.append(str(owner))
normalized_ids = sorted(set(ids)) normalized_ids = sorted(set(ids))
logger.reflect("Owner identifiers normalized", extra={"owner_count": len(normalized_ids)}) logger.reflect(
"Owner identifiers normalized", extra={"owner_count": len(normalized_ids)}
)
return normalized_ids return normalized_ids
# [/DEF:extract_owner_identifiers:Function] # [/DEF:extract_owner_identifiers:Function]
# [DEF:build_risks:Function] # [DEF:build_risks:Function]
# @PURPOSE: Build risk list from computed diffs and target catalog state. # @PURPOSE: Build risk list from computed diffs and target catalog state.
# @RELATION: DEPENDS_ON -> [index_by_uuid, extract_owner_identifiers]
# @PRE: source_objects/target_objects/diff contain dashboards/charts/datasets keys with expected list structures. # @PRE: source_objects/target_objects/diff contain dashboards/charts/datasets keys with expected list structures.
# @PRE: target_client is authenticated/usable for database list retrieval. # @PRE: target_client is authenticated/usable for database list retrieval.
# @POST: Returns list of deterministic risk items derived from overwrite, missing datasource, reference, and owner mismatch checks. # @POST: Returns list of deterministic risk items derived from overwrite, missing datasource, reference, and owner mismatch checks.
@@ -94,39 +102,47 @@ def build_risks(
risks: List[Dict[str, Any]] = [] risks: List[Dict[str, Any]] = []
for object_type in ("dashboards", "charts", "datasets"): for object_type in ("dashboards", "charts", "datasets"):
for item in diff[object_type]["update"]: for item in diff[object_type]["update"]:
risks.append({ risks.append(
{
"code": "overwrite_existing", "code": "overwrite_existing",
"severity": "medium", "severity": "medium",
"object_type": object_type[:-1], "object_type": object_type[:-1],
"object_uuid": item["uuid"], "object_uuid": item["uuid"],
"message": f"Object will be updated in target: {item.get('title') or item['uuid']}", "message": f"Object will be updated in target: {item.get('title') or item['uuid']}",
}) }
)
target_dataset_uuids = set(index_by_uuid(target_objects["datasets"]).keys()) target_dataset_uuids = set(index_by_uuid(target_objects["datasets"]).keys())
_, target_databases = target_client.get_databases(query={"columns": ["uuid"]}) _, target_databases = target_client.get_databases(query={"columns": ["uuid"]})
target_database_uuids = {str(item.get("uuid")) for item in target_databases if item.get("uuid")} target_database_uuids = {
str(item.get("uuid")) for item in target_databases if item.get("uuid")
}
for dataset in source_objects["datasets"]: for dataset in source_objects["datasets"]:
db_uuid = dataset.get("database_uuid") db_uuid = dataset.get("database_uuid")
if db_uuid and str(db_uuid) not in target_database_uuids: if db_uuid and str(db_uuid) not in target_database_uuids:
risks.append({ risks.append(
{
"code": "missing_datasource", "code": "missing_datasource",
"severity": "high", "severity": "high",
"object_type": "dataset", "object_type": "dataset",
"object_uuid": dataset.get("uuid"), "object_uuid": dataset.get("uuid"),
"message": f"Target datasource is missing for dataset {dataset.get('title') or dataset.get('uuid')}", "message": f"Target datasource is missing for dataset {dataset.get('title') or dataset.get('uuid')}",
}) }
)
for chart in source_objects["charts"]: for chart in source_objects["charts"]:
ds_uuid = chart.get("dataset_uuid") ds_uuid = chart.get("dataset_uuid")
if ds_uuid and str(ds_uuid) not in target_dataset_uuids: if ds_uuid and str(ds_uuid) not in target_dataset_uuids:
risks.append({ risks.append(
{
"code": "breaking_reference", "code": "breaking_reference",
"severity": "high", "severity": "high",
"object_type": "chart", "object_type": "chart",
"object_uuid": chart.get("uuid"), "object_uuid": chart.get("uuid"),
"message": f"Chart references dataset not found on target: {ds_uuid}", "message": f"Chart references dataset not found on target: {ds_uuid}",
}) }
)
source_dash = index_by_uuid(source_objects["dashboards"]) source_dash = index_by_uuid(source_objects["dashboards"])
target_dash = index_by_uuid(target_objects["dashboards"]) target_dash = index_by_uuid(target_objects["dashboards"])
@@ -138,15 +154,19 @@ def build_risks(
source_owners = extract_owner_identifiers(source_obj.get("owners")) source_owners = extract_owner_identifiers(source_obj.get("owners"))
target_owners = extract_owner_identifiers(target_obj.get("owners")) target_owners = extract_owner_identifiers(target_obj.get("owners"))
if source_owners and target_owners and source_owners != target_owners: if source_owners and target_owners and source_owners != target_owners:
risks.append({ risks.append(
{
"code": "owner_mismatch", "code": "owner_mismatch",
"severity": "low", "severity": "low",
"object_type": "dashboard", "object_type": "dashboard",
"object_uuid": item["uuid"], "object_uuid": item["uuid"],
"message": f"Owner mismatch for dashboard {item.get('title') or item['uuid']}", "message": f"Owner mismatch for dashboard {item.get('title') or item['uuid']}",
}) }
)
logger.reflect("Risk list assembled", extra={"risk_count": len(risks)}) logger.reflect("Risk list assembled", extra={"risk_count": len(risks)})
return risks return risks
# [/DEF:build_risks:Function] # [/DEF:build_risks:Function]
@@ -160,11 +180,15 @@ def score_risks(risk_items: List[Dict[str, Any]]) -> Dict[str, Any]:
with belief_scope("risk_assessor.score_risks"): with belief_scope("risk_assessor.score_risks"):
logger.reason("Scoring risk items", extra={"risk_items_count": len(risk_items)}) logger.reason("Scoring risk items", extra={"risk_items_count": len(risk_items)})
weights = {"high": 25, "medium": 10, "low": 5} weights = {"high": 25, "medium": 10, "low": 5}
score = min(100, sum(weights.get(item.get("severity", "low"), 5) for item in risk_items)) score = min(
100, sum(weights.get(item.get("severity", "low"), 5) for item in risk_items)
)
level = "low" if score < 25 else "medium" if score < 60 else "high" level = "low" if score < 25 else "medium" if score < 60 else "high"
result = {"score": score, "level": level, "items": risk_items} result = {"score": score, "level": level, "items": risk_items}
logger.reflect("Risk score computed", extra={"score": score, "level": level}) logger.reflect("Risk score computed", extra={"score": score, "level": level})
return result return result
# [/DEF:score_risks:Function] # [/DEF:score_risks:Function]

View File

@@ -25,10 +25,11 @@ from src.core.mapping_service import IdMappingService
from src.models.mapping import ResourceType from src.models.mapping import ResourceType
# [/SECTION] # [/SECTION]
# [DEF:MigrationEngine:Class] # [DEF:MigrationEngine:Class]
# @PURPOSE: Engine for transforming Superset export ZIPs. # @PURPOSE: Engine for transforming Superset export ZIPs.
# @RELATION: CONTAINS -> [__init__, transform_zip, _transform_yaml, _extract_chart_uuids_from_archive, _patch_dashboard_metadata]
class MigrationEngine: class MigrationEngine:
# [DEF:__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Initializes migration orchestration dependencies for ZIP/YAML metadata transformations. # @PURPOSE: Initializes migration orchestration dependencies for ZIP/YAML metadata transformations.
# @PRE: mapping_service is None or implements batch remote ID lookup for ResourceType.CHART. # @PRE: mapping_service is None or implements batch remote ID lookup for ResourceType.CHART.
@@ -41,10 +42,12 @@ class MigrationEngine:
logger.reason("Initializing MigrationEngine") logger.reason("Initializing MigrationEngine")
self.mapping_service = mapping_service self.mapping_service = mapping_service
logger.reflect("MigrationEngine initialized") logger.reflect("MigrationEngine initialized")
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:transform_zip:Function] # [DEF:transform_zip:Function]
# @PURPOSE: Extracts ZIP, replaces database UUIDs in YAMLs, patches cross-filters, and re-packages. # @PURPOSE: Extracts ZIP, replaces database UUIDs in YAMLs, patches cross-filters, and re-packages.
# @RELATION: DEPENDS_ON -> [_transform_yaml, _extract_chart_uuids_from_archive, _patch_dashboard_metadata]
# @PARAM: zip_path (str) - Path to the source ZIP file. # @PARAM: zip_path (str) - Path to the source ZIP file.
# @PARAM: output_path (str) - Path where the transformed ZIP will be saved. # @PARAM: output_path (str) - Path where the transformed ZIP will be saved.
# @PARAM: db_mapping (Dict[str, str]) - Mapping of source UUID to target UUID. # @PARAM: db_mapping (Dict[str, str]) - Mapping of source UUID to target UUID.
@@ -56,7 +59,15 @@ class MigrationEngine:
# @SIDE_EFFECT: Reads/writes filesystem archives, creates temporary directory, emits structured logs. # @SIDE_EFFECT: Reads/writes filesystem archives, creates temporary directory, emits structured logs.
# @DATA_CONTRACT: Input[(str zip_path, str output_path, Dict[str,str] db_mapping, bool strip_databases, Optional[str] target_env_id, bool fix_cross_filters)] -> Output[bool] # @DATA_CONTRACT: Input[(str zip_path, str output_path, Dict[str,str] db_mapping, bool strip_databases, Optional[str] target_env_id, bool fix_cross_filters)] -> Output[bool]
# @RETURN: bool - True if successful. # @RETURN: bool - True if successful.
def transform_zip(self, zip_path: str, output_path: str, db_mapping: Dict[str, str], strip_databases: bool = True, target_env_id: Optional[str] = None, fix_cross_filters: bool = False) -> bool: def transform_zip(
self,
zip_path: str,
output_path: str,
db_mapping: Dict[str, str],
strip_databases: bool = True,
target_env_id: Optional[str] = None,
fix_cross_filters: bool = False,
) -> bool:
""" """
Transform a Superset export ZIP by replacing database UUIDs and optionally fixing cross-filters. Transform a Superset export ZIP by replacing database UUIDs and optionally fixing cross-filters.
""" """
@@ -69,36 +80,52 @@ class MigrationEngine:
try: try:
# 1. Extract # 1. Extract
logger.reason(f"Extracting source archive to {temp_dir}") logger.reason(f"Extracting source archive to {temp_dir}")
with zipfile.ZipFile(zip_path, 'r') as zf: with zipfile.ZipFile(zip_path, "r") as zf:
zf.extractall(temp_dir) zf.extractall(temp_dir)
# 2. Transform YAMLs (Databases) # 2. Transform YAMLs (Databases)
dataset_files = list(temp_dir.glob("**/datasets/**/*.yaml")) + list(temp_dir.glob("**/datasets/*.yaml")) dataset_files = list(temp_dir.glob("**/datasets/**/*.yaml")) + list(
temp_dir.glob("**/datasets/*.yaml")
)
dataset_files = list(set(dataset_files)) dataset_files = list(set(dataset_files))
logger.reason(f"Transforming {len(dataset_files)} dataset YAML files") logger.reason(
f"Transforming {len(dataset_files)} dataset YAML files"
)
for ds_file in dataset_files: for ds_file in dataset_files:
self._transform_yaml(ds_file, db_mapping) self._transform_yaml(ds_file, db_mapping)
# 2.5 Patch Cross-Filters (Dashboards) # 2.5 Patch Cross-Filters (Dashboards)
if fix_cross_filters: if fix_cross_filters:
if self.mapping_service and target_env_id: if self.mapping_service and target_env_id:
dash_files = list(temp_dir.glob("**/dashboards/**/*.yaml")) + list(temp_dir.glob("**/dashboards/*.yaml")) dash_files = list(
temp_dir.glob("**/dashboards/**/*.yaml")
) + list(temp_dir.glob("**/dashboards/*.yaml"))
dash_files = list(set(dash_files)) dash_files = list(set(dash_files))
logger.reason(f"Patching cross-filters for {len(dash_files)} dashboards") logger.reason(
f"Patching cross-filters for {len(dash_files)} dashboards"
)
# Gather all source UUID-to-ID mappings from the archive first # Gather all source UUID-to-ID mappings from the archive first
source_id_to_uuid_map = self._extract_chart_uuids_from_archive(temp_dir) source_id_to_uuid_map = (
self._extract_chart_uuids_from_archive(temp_dir)
)
for dash_file in dash_files: for dash_file in dash_files:
self._patch_dashboard_metadata(dash_file, target_env_id, source_id_to_uuid_map) self._patch_dashboard_metadata(
dash_file, target_env_id, source_id_to_uuid_map
)
else: else:
logger.explore("Cross-filter patching requested but mapping service or target_env_id is missing") logger.explore(
"Cross-filter patching requested but mapping service or target_env_id is missing"
)
# 3. Re-package # 3. Re-package
logger.reason(f"Re-packaging transformed archive (strip_databases={strip_databases})") logger.reason(
with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zf: f"Re-packaging transformed archive (strip_databases={strip_databases})"
)
with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zf:
for root, dirs, files in os.walk(temp_dir): for root, dirs, files in os.walk(temp_dir):
rel_root = Path(root).relative_to(temp_dir) rel_root = Path(root).relative_to(temp_dir)
@@ -115,6 +142,7 @@ class MigrationEngine:
except Exception as e: except Exception as e:
logger.explore(f"Error transforming ZIP: {e}") logger.explore(f"Error transforming ZIP: {e}")
return False return False
# [/DEF:transform_zip:Function] # [/DEF:transform_zip:Function]
# [DEF:_transform_yaml:Function] # [DEF:_transform_yaml:Function]
@@ -131,19 +159,20 @@ class MigrationEngine:
logger.explore(f"YAML file not found: {file_path}") logger.explore(f"YAML file not found: {file_path}")
raise FileNotFoundError(str(file_path)) raise FileNotFoundError(str(file_path))
with open(file_path, 'r') as f: with open(file_path, "r") as f:
data = yaml.safe_load(f) data = yaml.safe_load(f)
if not data: if not data:
return return
source_uuid = data.get('database_uuid') source_uuid = data.get("database_uuid")
if source_uuid in db_mapping: if source_uuid in db_mapping:
logger.reason(f"Replacing database UUID in {file_path.name}") logger.reason(f"Replacing database UUID in {file_path.name}")
data['database_uuid'] = db_mapping[source_uuid] data["database_uuid"] = db_mapping[source_uuid]
with open(file_path, 'w') as f: with open(file_path, "w") as f:
yaml.dump(data, f) yaml.dump(data, f)
logger.reflect(f"Database UUID patched in {file_path.name}") logger.reflect(f"Database UUID patched in {file_path.name}")
# [/DEF:_transform_yaml:Function] # [/DEF:_transform_yaml:Function]
# [DEF:_extract_chart_uuids_from_archive:Function] # [DEF:_extract_chart_uuids_from_archive:Function]
@@ -161,16 +190,19 @@ class MigrationEngine:
# or manifesting the export metadata structure where source IDs are stored. # or manifesting the export metadata structure where source IDs are stored.
# For simplicity in US1 MVP, we assume it's read from chart files if present. # For simplicity in US1 MVP, we assume it's read from chart files if present.
mapping = {} mapping = {}
chart_files = list(temp_dir.glob("**/charts/**/*.yaml")) + list(temp_dir.glob("**/charts/*.yaml")) chart_files = list(temp_dir.glob("**/charts/**/*.yaml")) + list(
temp_dir.glob("**/charts/*.yaml")
)
for cf in set(chart_files): for cf in set(chart_files):
try: try:
with open(cf, 'r') as f: with open(cf, "r") as f:
cdata = yaml.safe_load(f) cdata = yaml.safe_load(f)
if cdata and 'id' in cdata and 'uuid' in cdata: if cdata and "id" in cdata and "uuid" in cdata:
mapping[cdata['id']] = cdata['uuid'] mapping[cdata["id"]] = cdata["uuid"]
except Exception: except Exception:
pass pass
return mapping return mapping
# [/DEF:_extract_chart_uuids_from_archive:Function] # [/DEF:_extract_chart_uuids_from_archive:Function]
# [DEF:_patch_dashboard_metadata:Function] # [DEF:_patch_dashboard_metadata:Function]
@@ -182,29 +214,37 @@ class MigrationEngine:
# @PARAM: file_path (Path) # @PARAM: file_path (Path)
# @PARAM: target_env_id (str) # @PARAM: target_env_id (str)
# @PARAM: source_map (Dict[int, str]) # @PARAM: source_map (Dict[int, str])
def _patch_dashboard_metadata(self, file_path: Path, target_env_id: str, source_map: Dict[int, str]): def _patch_dashboard_metadata(
self, file_path: Path, target_env_id: str, source_map: Dict[int, str]
):
with belief_scope("MigrationEngine._patch_dashboard_metadata"): with belief_scope("MigrationEngine._patch_dashboard_metadata"):
try: try:
if not file_path.exists(): if not file_path.exists():
return return
with open(file_path, 'r') as f: with open(file_path, "r") as f:
data = yaml.safe_load(f) data = yaml.safe_load(f)
if not data or 'json_metadata' not in data: if not data or "json_metadata" not in data:
return return
metadata_str = data['json_metadata'] metadata_str = data["json_metadata"]
if not metadata_str: if not metadata_str:
return return
# Fetch target UUIDs for everything we know: # Fetch target UUIDs for everything we know:
uuids_needed = list(source_map.values()) uuids_needed = list(source_map.values())
logger.reason(f"Resolving {len(uuids_needed)} remote IDs for dashboard metadata patching") logger.reason(
target_ids = self.mapping_service.get_remote_ids_batch(target_env_id, ResourceType.CHART, uuids_needed) f"Resolving {len(uuids_needed)} remote IDs for dashboard metadata patching"
)
target_ids = self.mapping_service.get_remote_ids_batch(
target_env_id, ResourceType.CHART, uuids_needed
)
if not target_ids: if not target_ids:
logger.reflect("No remote target IDs found in mapping database for this dashboard.") logger.reflect(
"No remote target IDs found in mapping database for this dashboard."
)
return return
# Map Source Int -> Target Int # Map Source Int -> Target Int
@@ -217,31 +257,46 @@ class MigrationEngine:
missing_targets.append(s_id) missing_targets.append(s_id)
if missing_targets: if missing_targets:
logger.explore(f"Missing target IDs for source IDs: {missing_targets}. Cross-filters might break.") logger.explore(
f"Missing target IDs for source IDs: {missing_targets}. Cross-filters might break."
)
if not source_to_target: if not source_to_target:
logger.reflect("No source IDs matched remotely. Skipping patch.") logger.reflect("No source IDs matched remotely. Skipping patch.")
return return
logger.reason(f"Patching {len(source_to_target)} ID references in json_metadata") logger.reason(
f"Patching {len(source_to_target)} ID references in json_metadata"
)
new_metadata_str = metadata_str new_metadata_str = metadata_str
for s_id, t_id in source_to_target.items(): for s_id, t_id in source_to_target.items():
new_metadata_str = re.sub(r'("datasetId"\s*:\s*)' + str(s_id) + r'(\b)', r'\g<1>' + str(t_id) + r'\g<2>', new_metadata_str) new_metadata_str = re.sub(
new_metadata_str = re.sub(r'("chartId"\s*:\s*)' + str(s_id) + r'(\b)', r'\g<1>' + str(t_id) + r'\g<2>', new_metadata_str) r'("datasetId"\s*:\s*)' + str(s_id) + r"(\b)",
r"\g<1>" + str(t_id) + r"\g<2>",
new_metadata_str,
)
new_metadata_str = re.sub(
r'("chartId"\s*:\s*)' + str(s_id) + r"(\b)",
r"\g<1>" + str(t_id) + r"\g<2>",
new_metadata_str,
)
# Re-parse to validate valid JSON # Re-parse to validate valid JSON
data['json_metadata'] = json.dumps(json.loads(new_metadata_str)) data["json_metadata"] = json.dumps(json.loads(new_metadata_str))
with open(file_path, 'w') as f: with open(file_path, "w") as f:
yaml.dump(data, f) yaml.dump(data, f)
logger.reflect(f"Dashboard metadata patched and saved: {file_path.name}") logger.reflect(
f"Dashboard metadata patched and saved: {file_path.name}"
)
except Exception as e: except Exception as e:
logger.explore(f"Metadata patch failed for {file_path.name}: {e}") logger.explore(f"Metadata patch failed for {file_path.name}: {e}")
# [/DEF:_patch_dashboard_metadata:Function] # [/DEF:_patch_dashboard_metadata:Function]
# [/DEF:MigrationEngine:Class] # [/DEF:MigrationEngine:Class]
# [/DEF:backend.src.core.migration_engine:Module] # [/DEF:backend.src.core.migration_engine:Module]

View File

@@ -694,7 +694,7 @@ class SupersetClient:
# @PRE: Client is authenticated and chart_id exists. # @PRE: Client is authenticated and chart_id exists.
# @POST: Returns chart payload from Superset API. # @POST: Returns chart payload from Superset API.
# @DATA_CONTRACT: Input[chart_id: int] -> Output[Dict] # @DATA_CONTRACT: Input[chart_id: int] -> Output[Dict]
# @RELATION: [CALLS] ->[APIClient.request] # @RELATION: [CALLS] ->[request]
def get_chart(self, chart_id: int) -> Dict: def get_chart(self, chart_id: int) -> Dict:
with belief_scope("SupersetClient.get_chart", f"id={chart_id}"): with belief_scope("SupersetClient.get_chart", f"id={chart_id}"):
response = self.network.request(method="GET", endpoint=f"/chart/{chart_id}") response = self.network.request(method="GET", endpoint=f"/chart/{chart_id}")
@@ -996,7 +996,7 @@ class SupersetClient:
# @PRE: Client is authenticated. # @PRE: Client is authenticated.
# @POST: Returns total count and charts list. # @POST: Returns total count and charts list.
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]] # @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
# @RELATION: [CALLS] ->[SupersetClient._fetch_all_pages] # @RELATION: [CALLS] ->[_fetch_all_pages]
def get_charts(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]: def get_charts(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
with belief_scope("get_charts"): with belief_scope("get_charts"):
validated_query = self._validate_query_params(query or {}) validated_query = self._validate_query_params(query or {})

View File

@@ -1,4 +1,5 @@
# [DEF:backend.src.core.task_manager.__tests__.test_context:Module] # [DEF:TestContext:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, task-context, background-tasks, sub-context # @SEMANTICS: tests, task-context, background-tasks, sub-context
# @PURPOSE: Verify TaskContext preserves optional background task scheduler across sub-context creation. # @PURPOSE: Verify TaskContext preserves optional background task scheduler across sub-context creation.
@@ -9,6 +10,7 @@ from src.core.task_manager.context import TaskContext
# [DEF:test_task_context_preserves_background_tasks_across_sub_context:Function] # [DEF:test_task_context_preserves_background_tasks_across_sub_context:Function]
# @RELATION: BINDS_TO -> TestContext
# @PURPOSE: Plugins must be able to access background_tasks from both root and sub-context loggers. # @PURPOSE: Plugins must be able to access background_tasks from both root and sub-context loggers.
# @PRE: TaskContext is initialized with a BackgroundTasks-like object. # @PRE: TaskContext is initialized with a BackgroundTasks-like object.
# @POST: background_tasks remains available on root and derived sub-contexts. # @POST: background_tasks remains available on root and derived sub-contexts.
@@ -26,4 +28,4 @@ def test_task_context_preserves_background_tasks_across_sub_context():
assert context.background_tasks is background_tasks assert context.background_tasks is background_tasks
assert sub_context.background_tasks is background_tasks assert sub_context.background_tasks is background_tasks
# [/DEF:test_task_context_preserves_background_tasks_across_sub_context:Function] # [/DEF:test_task_context_preserves_background_tasks_across_sub_context:Function]
# [/DEF:backend.src.core.task_manager.__tests__.test_context:Module] # [/DEF:TestContext:Module]

View File

@@ -17,12 +17,18 @@ def task_logger(mock_add_log):
return TaskLogger(task_id="test_123", add_log_fn=mock_add_log, source="test_plugin") return TaskLogger(task_id="test_123", add_log_fn=mock_add_log, source="test_plugin")
# @TEST_CONTRACT: TaskLoggerModel -> Invariants # @TEST_CONTRACT: TaskLoggerModel -> Invariants
# [DEF:test_task_logger_initialization:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_task_logger_initialization(task_logger): def test_task_logger_initialization(task_logger):
"""Verify TaskLogger is bound to specific task_id and source.""" """Verify TaskLogger is bound to specific task_id and source."""
assert task_logger._task_id == "test_123" assert task_logger._task_id == "test_123"
assert task_logger._default_source == "test_plugin" assert task_logger._default_source == "test_plugin"
# @TEST_CONTRACT: invariants -> "All specific log methods (info, error) delegate to _log" # @TEST_CONTRACT: invariants -> "All specific log methods (info, error) delegate to _log"
# [/DEF:test_task_logger_initialization:Function]
# [DEF:test_log_methods_delegation:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_log_methods_delegation(task_logger, mock_add_log): def test_log_methods_delegation(task_logger, mock_add_log):
"""Verify info, error, warning, debug delegate to internal _log.""" """Verify info, error, warning, debug delegate to internal _log."""
task_logger.info("info message", metadata={"k": "v"}) task_logger.info("info message", metadata={"k": "v"})
@@ -62,6 +68,10 @@ def test_log_methods_delegation(task_logger, mock_add_log):
) )
# @TEST_CONTRACT: invariants -> "with_source creates a new logger with the same task_id" # @TEST_CONTRACT: invariants -> "with_source creates a new logger with the same task_id"
# [/DEF:test_log_methods_delegation:Function]
# [DEF:test_with_source:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_with_source(task_logger): def test_with_source(task_logger):
"""Verify with_source returns a new instance with updated default source.""" """Verify with_source returns a new instance with updated default source."""
new_logger = task_logger.with_source("new_source") new_logger = task_logger.with_source("new_source")
@@ -71,18 +81,30 @@ def test_with_source(task_logger):
assert new_logger is not task_logger assert new_logger is not task_logger
# @TEST_EDGE: missing_task_id -> raises TypeError # @TEST_EDGE: missing_task_id -> raises TypeError
# [/DEF:test_with_source:Function]
# [DEF:test_missing_task_id:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_missing_task_id(): def test_missing_task_id():
with pytest.raises(TypeError): with pytest.raises(TypeError):
TaskLogger(add_log_fn=lambda x: x) TaskLogger(add_log_fn=lambda x: x)
# @TEST_EDGE: invalid_add_log_fn -> raises TypeError # @TEST_EDGE: invalid_add_log_fn -> raises TypeError
# (Python doesn't strictly enforce this at init, but let's verify it fails on call if not callable) # (Python doesn't strictly enforce this at init, but let's verify it fails on call if not callable)
# [/DEF:test_missing_task_id:Function]
# [DEF:test_invalid_add_log_fn:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_invalid_add_log_fn(): def test_invalid_add_log_fn():
logger = TaskLogger(task_id="msg", add_log_fn=None) logger = TaskLogger(task_id="msg", add_log_fn=None)
with pytest.raises(TypeError): with pytest.raises(TypeError):
logger.info("test") logger.info("test")
# @TEST_INVARIANT: consistent_delegation # @TEST_INVARIANT: consistent_delegation
# [/DEF:test_invalid_add_log_fn:Function]
# [DEF:test_progress_log:Function]
# @RELATION: BINDS_TO -> __tests__/test_task_logger
def test_progress_log(task_logger, mock_add_log): def test_progress_log(task_logger, mock_add_log):
"""Verify progress method correctly formats metadata.""" """Verify progress method correctly formats metadata."""
task_logger.progress("Step 1", 45.5) task_logger.progress("Step 1", 45.5)
@@ -100,3 +122,4 @@ def test_progress_log(task_logger, mock_add_log):
task_logger.progress("Step low", -10) task_logger.progress("Step low", -10)
assert mock_add_log.call_args[1]["metadata"]["progress"] == 0 assert mock_add_log.call_args[1]["metadata"]["progress"] == 0
# [/DEF:test_progress_log:Function]

View File

@@ -309,7 +309,7 @@ class APIClient:
except (requests.exceptions.RequestException, KeyError) as e: except (requests.exceptions.RequestException, KeyError) as e:
SupersetAuthCache.invalidate(self._auth_cache_key) SupersetAuthCache.invalidate(self._auth_cache_key)
raise NetworkError(f"Network or parsing error during authentication: {e}") from e raise NetworkError(f"Network or parsing error during authentication: {e}") from e
# [/DEF:authenticate:Function] # [/DEF:APIClient.authenticate:Function]
@property @property
# [DEF:headers:Function] # [DEF:headers:Function]

View File

@@ -34,6 +34,8 @@ class PreviewCompilationPayload:
preview_fingerprint: str preview_fingerprint: str
template_params: Dict[str, Any] template_params: Dict[str, Any]
effective_filters: List[Dict[str, Any]] effective_filters: List[Dict[str, Any]]
# [/DEF:PreviewCompilationPayload:Class] # [/DEF:PreviewCompilationPayload:Class]
@@ -47,6 +49,8 @@ class SqlLabLaunchPayload:
preview_id: str preview_id: str
compiled_sql: str compiled_sql: str
template_params: Dict[str, Any] template_params: Dict[str, Any]
# [/DEF:SqlLabLaunchPayload:Class] # [/DEF:SqlLabLaunchPayload:Class]
@@ -61,11 +65,25 @@ class SupersetCompilationAdapter:
# [DEF:SupersetCompilationAdapter.__init__:Function] # [DEF:SupersetCompilationAdapter.__init__:Function]
# @COMPLEXITY: 2 # @COMPLEXITY: 2
# @PURPOSE: Bind adapter to one Superset environment and client instance. # @PURPOSE: Bind adapter to one Superset environment and client instance.
def __init__(self, environment: Environment, client: Optional[SupersetClient] = None) -> None: def __init__(
self, environment: Environment, client: Optional[SupersetClient] = None
) -> None:
self.environment = environment self.environment = environment
self.client = client or SupersetClient(environment) self.client = client or SupersetClient(environment)
# [/DEF:SupersetCompilationAdapter.__init__:Function] # [/DEF:SupersetCompilationAdapter.__init__:Function]
# [DEF:SupersetCompilationAdapter._supports_client_method:Function]
# @COMPLEXITY: 2
# @PURPOSE: Detect explicitly implemented client capabilities without treating loose mocks as real methods.
def _supports_client_method(self, method_name: str) -> bool:
client_dict = getattr(self.client, "__dict__", {})
if method_name in client_dict:
return callable(client_dict[method_name])
return callable(getattr(type(self.client), method_name, None))
# [/DEF:SupersetCompilationAdapter._supports_client_method:Function]
# [DEF:SupersetCompilationAdapter.compile_preview:Function] # [DEF:SupersetCompilationAdapter.compile_preview:Function]
# @COMPLEXITY: 4 # @COMPLEXITY: 4
# @PURPOSE: Request Superset-side compiled SQL preview for the current effective inputs. # @PURPOSE: Request Superset-side compiled SQL preview for the current effective inputs.
@@ -79,7 +97,10 @@ class SupersetCompilationAdapter:
if payload.dataset_id <= 0: if payload.dataset_id <= 0:
logger.explore( logger.explore(
"Preview compilation rejected because dataset identifier is invalid", "Preview compilation rejected because dataset identifier is invalid",
extra={"dataset_id": payload.dataset_id, "session_id": payload.session_id}, extra={
"dataset_id": payload.dataset_id,
"session_id": payload.session_id,
},
) )
raise ValueError("dataset_id must be a positive integer") raise ValueError("dataset_id must be a positive integer")
@@ -155,6 +176,7 @@ class SupersetCompilationAdapter:
}, },
) )
return preview return preview
# [/DEF:SupersetCompilationAdapter.compile_preview:Function] # [/DEF:SupersetCompilationAdapter.compile_preview:Function]
# [DEF:SupersetCompilationAdapter.mark_preview_stale:Function] # [DEF:SupersetCompilationAdapter.mark_preview_stale:Function]
@@ -165,6 +187,7 @@ class SupersetCompilationAdapter:
def mark_preview_stale(self, preview: CompiledPreview) -> CompiledPreview: def mark_preview_stale(self, preview: CompiledPreview) -> CompiledPreview:
preview.preview_status = PreviewStatus.STALE preview.preview_status = PreviewStatus.STALE
return preview return preview
# [/DEF:SupersetCompilationAdapter.mark_preview_stale:Function] # [/DEF:SupersetCompilationAdapter.mark_preview_stale:Function]
# [DEF:SupersetCompilationAdapter.create_sql_lab_session:Function] # [DEF:SupersetCompilationAdapter.create_sql_lab_session:Function]
@@ -181,7 +204,10 @@ class SupersetCompilationAdapter:
if not compiled_sql: if not compiled_sql:
logger.explore( logger.explore(
"SQL Lab launch rejected because compiled SQL is empty", "SQL Lab launch rejected because compiled SQL is empty",
extra={"session_id": payload.session_id, "preview_id": payload.preview_id}, extra={
"session_id": payload.session_id,
"preview_id": payload.preview_id,
},
) )
raise ValueError("compiled_sql must be non-empty") raise ValueError("compiled_sql must be non-empty")
@@ -204,9 +230,14 @@ class SupersetCompilationAdapter:
if not sql_lab_session_ref: if not sql_lab_session_ref:
logger.explore( logger.explore(
"Superset SQL Lab launch response did not include a stable session reference", "Superset SQL Lab launch response did not include a stable session reference",
extra={"session_id": payload.session_id, "preview_id": payload.preview_id}, extra={
"session_id": payload.session_id,
"preview_id": payload.preview_id,
},
)
raise RuntimeError(
"Superset SQL Lab launch response did not include a session reference"
) )
raise RuntimeError("Superset SQL Lab launch response did not include a session reference")
logger.reflect( logger.reflect(
"Canonical SQL Lab session created successfully", "Canonical SQL Lab session created successfully",
@@ -217,6 +248,7 @@ class SupersetCompilationAdapter:
}, },
) )
return sql_lab_session_ref return sql_lab_session_ref
# [/DEF:SupersetCompilationAdapter.create_sql_lab_session:Function] # [/DEF:SupersetCompilationAdapter.create_sql_lab_session:Function]
# [DEF:SupersetCompilationAdapter._request_superset_preview:Function] # [DEF:SupersetCompilationAdapter._request_superset_preview:Function]
@@ -227,7 +259,48 @@ class SupersetCompilationAdapter:
# @POST: returns one normalized upstream compilation response including the chosen strategy metadata. # @POST: returns one normalized upstream compilation response including the chosen strategy metadata.
# @SIDE_EFFECT: issues one or more Superset preview requests through the client fallback chain. # @SIDE_EFFECT: issues one or more Superset preview requests through the client fallback chain.
# @DATA_CONTRACT: Input[PreviewCompilationPayload] -> Output[Dict[str,Any]] # @DATA_CONTRACT: Input[PreviewCompilationPayload] -> Output[Dict[str,Any]]
def _request_superset_preview(self, payload: PreviewCompilationPayload) -> Dict[str, Any]: def _request_superset_preview(
self, payload: PreviewCompilationPayload
) -> Dict[str, Any]:
direct_compile_preview = getattr(self.client, "compile_preview", None)
if self._supports_client_method("compile_preview") and callable(
direct_compile_preview
):
try:
logger.reason(
"Attempting preview compilation via direct client capability",
extra={
"dataset_id": payload.dataset_id,
"session_id": payload.session_id,
},
)
response = direct_compile_preview(payload)
except TypeError:
response = direct_compile_preview(
payload.dataset_id,
template_params=payload.template_params,
effective_filters=payload.effective_filters,
)
except Exception as exc:
logger.explore(
"Direct client preview capability failed; falling back to dataset preview strategies",
extra={
"dataset_id": payload.dataset_id,
"session_id": payload.session_id,
"error": str(exc),
},
)
else:
normalized = self._normalize_preview_response(response)
if normalized is not None:
return normalized
direct_compile_dataset_preview = getattr(
self.client, "compile_dataset_preview", None
)
if self._supports_client_method("compile_dataset_preview") and callable(
direct_compile_dataset_preview
):
try: try:
logger.reason( logger.reason(
"Attempting deterministic Superset preview compilation through supported endpoint strategies", "Attempting deterministic Superset preview compilation through supported endpoint strategies",
@@ -238,7 +311,7 @@ class SupersetCompilationAdapter:
"template_param_count": len(payload.template_params), "template_param_count": len(payload.template_params),
}, },
) )
response = self.client.compile_dataset_preview( response = direct_compile_dataset_preview(
dataset_id=payload.dataset_id, dataset_id=payload.dataset_id,
template_params=payload.template_params, template_params=payload.template_params,
effective_filters=payload.effective_filters, effective_filters=payload.effective_filters,
@@ -256,8 +329,72 @@ class SupersetCompilationAdapter:
normalized = self._normalize_preview_response(response) normalized = self._normalize_preview_response(response)
if normalized is None: if normalized is None:
raise RuntimeError("Superset preview compilation response could not be normalized") raise RuntimeError(
"Superset preview compilation response could not be normalized"
)
return normalized return normalized
try:
logger.reason(
"Attempting deterministic Superset preview compilation through supported endpoint strategies",
extra={
"dataset_id": payload.dataset_id,
"session_id": payload.session_id,
"filter_count": len(payload.effective_filters),
"template_param_count": len(payload.template_params),
},
)
if self._supports_client_method("compile_dataset_preview"):
response = self.client.compile_dataset_preview(
dataset_id=payload.dataset_id,
template_params=payload.template_params,
effective_filters=payload.effective_filters,
)
normalized = self._normalize_preview_response(response)
if normalized is None:
raise RuntimeError(
"Superset preview compilation response could not be normalized"
)
return normalized
errors: List[str] = []
for endpoint in (
f"/dataset/{payload.dataset_id}/preview",
f"/dataset/{payload.dataset_id}/sql",
):
try:
response = self.client.network.request(
method="POST",
endpoint=endpoint,
data=self._dump_json(
{
"template_params": payload.template_params,
"effective_filters": payload.effective_filters,
}
),
headers={"Content-Type": "application/json"},
)
normalized = self._normalize_preview_response(response)
if normalized is not None:
return normalized
errors.append(f"{endpoint}:unrecognized_response")
except Exception as exc:
errors.append(f"{endpoint}:{exc}")
raise RuntimeError(
"; ".join(errors) or "Superset preview compilation failed"
)
except Exception as exc:
logger.explore(
"Superset preview compilation failed across supported endpoint strategies",
extra={
"dataset_id": payload.dataset_id,
"session_id": payload.session_id,
"error": str(exc),
},
)
raise RuntimeError(str(exc)) from exc
# [/DEF:SupersetCompilationAdapter._request_superset_preview:Function] # [/DEF:SupersetCompilationAdapter._request_superset_preview:Function]
# [DEF:SupersetCompilationAdapter._request_sql_lab_session:Function] # [DEF:SupersetCompilationAdapter._request_sql_lab_session:Function]
@@ -270,10 +407,20 @@ class SupersetCompilationAdapter:
# @DATA_CONTRACT: Input[SqlLabLaunchPayload] -> Output[Dict[str,Any]] # @DATA_CONTRACT: Input[SqlLabLaunchPayload] -> Output[Dict[str,Any]]
def _request_sql_lab_session(self, payload: SqlLabLaunchPayload) -> Dict[str, Any]: def _request_sql_lab_session(self, payload: SqlLabLaunchPayload) -> Dict[str, Any]:
dataset_raw = self.client.get_dataset(payload.dataset_id) dataset_raw = self.client.get_dataset(payload.dataset_id)
dataset_record = dataset_raw.get("result", dataset_raw) if isinstance(dataset_raw, dict) else {} dataset_record = (
database_id = dataset_record.get("database", {}).get("id") if isinstance(dataset_record.get("database"), dict) else dataset_record.get("database_id") dataset_raw.get("result", dataset_raw)
if isinstance(dataset_raw, dict)
else {}
)
database_id = (
dataset_record.get("database", {}).get("id")
if isinstance(dataset_record.get("database"), dict)
else dataset_record.get("database_id")
)
if database_id is None: if database_id is None:
raise RuntimeError("Superset dataset does not expose a database identifier for SQL Lab launch") raise RuntimeError(
"Superset dataset does not expose a database identifier for SQL Lab launch"
)
request_payload = { request_payload = {
"database_id": database_id, "database_id": database_id,
@@ -305,7 +452,10 @@ class SupersetCompilationAdapter:
extra={"target": candidate["target"], "error": str(exc)}, extra={"target": candidate["target"], "error": str(exc)},
) )
raise RuntimeError("; ".join(errors) or "No Superset SQL Lab surface accepted the request") raise RuntimeError(
"; ".join(errors) or "No Superset SQL Lab surface accepted the request"
)
# [/DEF:SupersetCompilationAdapter._request_sql_lab_session:Function] # [/DEF:SupersetCompilationAdapter._request_sql_lab_session:Function]
# [DEF:SupersetCompilationAdapter._normalize_preview_response:Function] # [DEF:SupersetCompilationAdapter._normalize_preview_response:Function]
@@ -339,6 +489,7 @@ class SupersetCompilationAdapter:
"raw_response": response, "raw_response": response,
} }
return None return None
# [/DEF:SupersetCompilationAdapter._normalize_preview_response:Function] # [/DEF:SupersetCompilationAdapter._normalize_preview_response:Function]
# [DEF:SupersetCompilationAdapter._dump_json:Function] # [DEF:SupersetCompilationAdapter._dump_json:Function]
@@ -348,7 +499,10 @@ class SupersetCompilationAdapter:
import json import json
return json.dumps(payload, sort_keys=True, default=str) return json.dumps(payload, sort_keys=True, default=str)
# [/DEF:SupersetCompilationAdapter._dump_json:Function] # [/DEF:SupersetCompilationAdapter._dump_json:Function]
# [/DEF:SupersetCompilationAdapter:Class] # [/DEF:SupersetCompilationAdapter:Class]
# [/DEF:SupersetCompilationAdapter:Module] # [/DEF:SupersetCompilationAdapter:Module]

View File

@@ -15,6 +15,7 @@
# @RELATION: CALLS ->[init_db] # @RELATION: CALLS ->[init_db]
from pathlib import Path from pathlib import Path
from typing import Optional
from fastapi import Depends, HTTPException, status from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer from fastapi.security import OAuth2PasswordBearer
from jose import JWTError from jose import JWTError
@@ -25,10 +26,16 @@ from .core.scheduler import SchedulerService
from .services.resource_service import ResourceService from .services.resource_service import ResourceService
from .services.mapping_service import MappingService from .services.mapping_service import MappingService
from .services.clean_release.repositories import ( from .services.clean_release.repositories import (
CandidateRepository, ArtifactRepository, ManifestRepository, CandidateRepository,
PolicyRepository, ComplianceRepository, ReportRepository, ArtifactRepository,
ApprovalRepository, PublicationRepository, AuditRepository, ManifestRepository,
CleanReleaseAuditLog PolicyRepository,
ComplianceRepository,
ReportRepository,
ApprovalRepository,
PublicationRepository,
AuditRepository,
CleanReleaseAuditLog,
) )
from .services.clean_release.repository import CleanReleaseRepository from .services.clean_release.repository import CleanReleaseRepository
from .services.clean_release.facade import CleanReleaseFacade from .services.clean_release.facade import CleanReleaseFacade
@@ -39,14 +46,17 @@ from .core.auth.jwt import decode_token
from .core.auth.repository import AuthRepository from .core.auth.repository import AuthRepository
from .models.auth import User from .models.auth import User
# Initialize singletons # Initialize singletons lazily to avoid import-time DB side effects during test collection.
# Use absolute path relative to this file to ensure plugins are found regardless of CWD # Use absolute path relative to this file to ensure plugins are found regardless of CWD
project_root = Path(__file__).parent.parent.parent project_root = Path(__file__).parent.parent.parent
config_path = project_root / "config.json" config_path = project_root / "config.json"
# Initialize database before services that use persisted configuration. config_manager: Optional[ConfigManager] = None
init_db() plugin_loader: Optional[PluginLoader] = None
config_manager = ConfigManager(config_path=str(config_path)) task_manager: Optional[TaskManager] = None
scheduler_service: Optional[SchedulerService] = None
resource_service: Optional[ResourceService] = None
# [DEF:get_config_manager:Function] # [DEF:get_config_manager:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
@@ -56,29 +66,23 @@ config_manager = ConfigManager(config_path=str(config_path))
# @RETURN: ConfigManager - The shared config manager instance. # @RETURN: ConfigManager - The shared config manager instance.
def get_config_manager() -> ConfigManager: def get_config_manager() -> ConfigManager:
"""Dependency injector for ConfigManager.""" """Dependency injector for ConfigManager."""
global config_manager
if config_manager is None:
init_db()
config_manager = ConfigManager(config_path=str(config_path))
return config_manager return config_manager
# [/DEF:get_config_manager:Function] # [/DEF:get_config_manager:Function]
plugin_dir = Path(__file__).parent / "plugins" plugin_dir = Path(__file__).parent / "plugins"
plugin_loader = PluginLoader(plugin_dir=str(plugin_dir))
logger.info(f"PluginLoader initialized with directory: {plugin_dir}")
logger.info(f"Available plugins: {[config.name for config in plugin_loader.get_all_plugin_configs()]}")
task_manager = TaskManager(plugin_loader)
logger.info("TaskManager initialized")
scheduler_service = SchedulerService(task_manager, config_manager)
logger.info("SchedulerService initialized")
resource_service = ResourceService()
logger.info("ResourceService initialized")
# Clean Release Redesign Singletons # Clean Release Redesign Singletons
# Note: These use get_db() which is a generator, so we need a way to provide a session. # Note: These use get_db() which is a generator, so we need a way to provide a session.
# For singletons in dependencies.py, we might need a different approach or # For singletons in dependencies.py, we might need a different approach or
# initialize them inside the dependency functions. # initialize them inside the dependency functions.
# [DEF:get_plugin_loader:Function] # [DEF:get_plugin_loader:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Dependency injector for PluginLoader. # @PURPOSE: Dependency injector for PluginLoader.
@@ -87,9 +91,19 @@ logger.info("ResourceService initialized")
# @RETURN: PluginLoader - The shared plugin loader instance. # @RETURN: PluginLoader - The shared plugin loader instance.
def get_plugin_loader() -> PluginLoader: def get_plugin_loader() -> PluginLoader:
"""Dependency injector for PluginLoader.""" """Dependency injector for PluginLoader."""
global plugin_loader
if plugin_loader is None:
plugin_loader = PluginLoader(plugin_dir=str(plugin_dir))
logger.info(f"PluginLoader initialized with directory: {plugin_dir}")
logger.info(
f"Available plugins: {[config.name for config in plugin_loader.get_all_plugin_configs()]}"
)
return plugin_loader return plugin_loader
# [/DEF:get_plugin_loader:Function] # [/DEF:get_plugin_loader:Function]
# [DEF:get_task_manager:Function] # [DEF:get_task_manager:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Dependency injector for TaskManager. # @PURPOSE: Dependency injector for TaskManager.
@@ -98,9 +112,16 @@ def get_plugin_loader() -> PluginLoader:
# @RETURN: TaskManager - The shared task manager instance. # @RETURN: TaskManager - The shared task manager instance.
def get_task_manager() -> TaskManager: def get_task_manager() -> TaskManager:
"""Dependency injector for TaskManager.""" """Dependency injector for TaskManager."""
global task_manager
if task_manager is None:
task_manager = TaskManager(get_plugin_loader())
logger.info("TaskManager initialized")
return task_manager return task_manager
# [/DEF:get_task_manager:Function] # [/DEF:get_task_manager:Function]
# [DEF:get_scheduler_service:Function] # [DEF:get_scheduler_service:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Dependency injector for SchedulerService. # @PURPOSE: Dependency injector for SchedulerService.
@@ -109,9 +130,16 @@ def get_task_manager() -> TaskManager:
# @RETURN: SchedulerService - The shared scheduler service instance. # @RETURN: SchedulerService - The shared scheduler service instance.
def get_scheduler_service() -> SchedulerService: def get_scheduler_service() -> SchedulerService:
"""Dependency injector for SchedulerService.""" """Dependency injector for SchedulerService."""
global scheduler_service
if scheduler_service is None:
scheduler_service = SchedulerService(get_task_manager(), get_config_manager())
logger.info("SchedulerService initialized")
return scheduler_service return scheduler_service
# [/DEF:get_scheduler_service:Function] # [/DEF:get_scheduler_service:Function]
# [DEF:get_resource_service:Function] # [DEF:get_resource_service:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Dependency injector for ResourceService. # @PURPOSE: Dependency injector for ResourceService.
@@ -120,9 +148,16 @@ def get_scheduler_service() -> SchedulerService:
# @RETURN: ResourceService - The shared resource service instance. # @RETURN: ResourceService - The shared resource service instance.
def get_resource_service() -> ResourceService: def get_resource_service() -> ResourceService:
"""Dependency injector for ResourceService.""" """Dependency injector for ResourceService."""
global resource_service
if resource_service is None:
resource_service = ResourceService()
logger.info("ResourceService initialized")
return resource_service return resource_service
# [/DEF:get_resource_service:Function] # [/DEF:get_resource_service:Function]
# [DEF:get_mapping_service:Function] # [DEF:get_mapping_service:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Dependency injector for MappingService. # @PURPOSE: Dependency injector for MappingService.
@@ -131,12 +166,15 @@ def get_resource_service() -> ResourceService:
# @RETURN: MappingService - A new mapping service instance. # @RETURN: MappingService - A new mapping service instance.
def get_mapping_service() -> MappingService: def get_mapping_service() -> MappingService:
"""Dependency injector for MappingService.""" """Dependency injector for MappingService."""
return MappingService(config_manager) return MappingService(get_config_manager())
# [/DEF:get_mapping_service:Function] # [/DEF:get_mapping_service:Function]
_clean_release_repository = CleanReleaseRepository() _clean_release_repository = CleanReleaseRepository()
# [DEF:get_clean_release_repository:Function] # [DEF:get_clean_release_repository:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Legacy compatibility shim for CleanReleaseRepository. # @PURPOSE: Legacy compatibility shim for CleanReleaseRepository.
@@ -144,6 +182,8 @@ _clean_release_repository = CleanReleaseRepository()
def get_clean_release_repository() -> CleanReleaseRepository: def get_clean_release_repository() -> CleanReleaseRepository:
"""Legacy compatibility shim for CleanReleaseRepository.""" """Legacy compatibility shim for CleanReleaseRepository."""
return _clean_release_repository return _clean_release_repository
# [/DEF:get_clean_release_repository:Function] # [/DEF:get_clean_release_repository:Function]
@@ -172,17 +212,22 @@ def get_clean_release_facade(db = Depends(get_db)) -> CleanReleaseFacade:
approval_repo=approval_repo, approval_repo=approval_repo,
publication_repo=publication_repo, publication_repo=publication_repo,
audit_repo=audit_repo, audit_repo=audit_repo,
config_manager=config_manager config_manager=get_config_manager(),
) )
# [/DEF:get_clean_release_facade:Function] # [/DEF:get_clean_release_facade:Function]
# [DEF:oauth2_scheme:Variable] # [DEF:oauth2_scheme:Variable]
# @RELATION: DEPENDS_ON -> OAuth2PasswordBearer
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: OAuth2 password bearer scheme for token extraction. # @PURPOSE: OAuth2 password bearer scheme for token extraction.
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login") oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
# [/DEF:oauth2_scheme:Variable] # [/DEF:oauth2_scheme:Variable]
# [DEF:get_current_user:Function] # [DEF:get_current_user:Function]
# @RELATION: CALLS -> AuthRepository
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Dependency for retrieving currently authenticated user from a JWT. # @PURPOSE: Dependency for retrieving currently authenticated user from a JWT.
# @PRE: JWT token provided in Authorization header. # @PRE: JWT token provided in Authorization header.
@@ -199,9 +244,10 @@ def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_
) )
try: try:
payload = decode_token(token) payload = decode_token(token)
username: str = payload.get("sub") username_value = payload.get("sub")
if username is None: if not isinstance(username_value, str) or not username_value:
raise credentials_exception raise credentials_exception
username = username_value
except JWTError: except JWTError:
raise credentials_exception raise credentials_exception
@@ -210,9 +256,13 @@ def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_
if user is None: if user is None:
raise credentials_exception raise credentials_exception
return user return user
# [/DEF:get_current_user:Function] # [/DEF:get_current_user:Function]
# [DEF:has_permission:Function] # [DEF:has_permission:Function]
# @RELATION: CALLS -> AuthRepository
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Dependency for checking if the current user has a specific permission. # @PURPOSE: Dependency for checking if the current user has a specific permission.
# @PRE: User is authenticated. # @PRE: User is authenticated.
@@ -234,13 +284,21 @@ def has_permission(resource: str, action: str):
return current_user return current_user
from .core.auth.logger import log_security_event from .core.auth.logger import log_security_event
log_security_event("PERMISSION_DENIED", current_user.username, {"resource": resource, "action": action})
log_security_event(
"PERMISSION_DENIED",
str(getattr(current_user, "username", "unknown")),
{"resource": resource, "action": action},
)
raise HTTPException( raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, status_code=status.HTTP_403_FORBIDDEN,
detail=f"Permission denied for {resource}:{action}" detail=f"Permission denied for {resource}:{action}",
) )
return permission_checker return permission_checker
# [/DEF:has_permission:Function] # [/DEF:has_permission:Function]
# [/DEF:AppDependencies:Module] # [/DEF:AppDependencies:Module]

View File

@@ -36,17 +36,27 @@ def valid_candidate_data():
"source_snapshot_ref": "v1.0.0-snapshot" "source_snapshot_ref": "v1.0.0-snapshot"
} }
# [DEF:test_release_candidate_valid:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that a valid release candidate can be instantiated.
def test_release_candidate_valid(valid_candidate_data): def test_release_candidate_valid(valid_candidate_data):
rc = ReleaseCandidate(**valid_candidate_data) rc = ReleaseCandidate(**valid_candidate_data)
assert rc.candidate_id == "RC-001" assert rc.candidate_id == "RC-001"
assert rc.status == ReleaseCandidateStatus.DRAFT assert rc.status == ReleaseCandidateStatus.DRAFT
# [/DEF:test_release_candidate_valid:Function]
# [DEF:test_release_candidate_empty_id:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that a release candidate with an empty ID is rejected.
def test_release_candidate_empty_id(valid_candidate_data): def test_release_candidate_empty_id(valid_candidate_data):
valid_candidate_data["candidate_id"] = " " valid_candidate_data["candidate_id"] = " "
with pytest.raises(ValueError, match="candidate_id must be non-empty"): with pytest.raises(ValueError, match="candidate_id must be non-empty"):
ReleaseCandidate(**valid_candidate_data) ReleaseCandidate(**valid_candidate_data)
# @TEST_FIXTURE: valid_enterprise_policy # @TEST_FIXTURE: valid_enterprise_policy
# [/DEF:test_release_candidate_empty_id:Function]
@pytest.fixture @pytest.fixture
def valid_policy_data(): def valid_policy_data():
return { return {
@@ -61,17 +71,30 @@ def valid_policy_data():
} }
# @TEST_INVARIANT: policy_purity # @TEST_INVARIANT: policy_purity
# [DEF:test_enterprise_policy_valid:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that a valid enterprise policy is accepted.
def test_enterprise_policy_valid(valid_policy_data): def test_enterprise_policy_valid(valid_policy_data):
policy = CleanProfilePolicy(**valid_policy_data) policy = CleanProfilePolicy(**valid_policy_data)
assert policy.external_source_forbidden is True assert policy.external_source_forbidden is True
# @TEST_EDGE: enterprise_policy_missing_prohibited # @TEST_EDGE: enterprise_policy_missing_prohibited
# [/DEF:test_enterprise_policy_valid:Function]
# [DEF:test_enterprise_policy_missing_prohibited:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that an enterprise policy without prohibited categories is rejected.
def test_enterprise_policy_missing_prohibited(valid_policy_data): def test_enterprise_policy_missing_prohibited(valid_policy_data):
valid_policy_data["prohibited_artifact_categories"] = [] valid_policy_data["prohibited_artifact_categories"] = []
with pytest.raises(ValueError, match="enterprise-clean policy requires prohibited_artifact_categories"): with pytest.raises(ValueError, match="enterprise-clean policy requires prohibited_artifact_categories"):
CleanProfilePolicy(**valid_policy_data) CleanProfilePolicy(**valid_policy_data)
# @TEST_EDGE: enterprise_policy_external_allowed # @TEST_EDGE: enterprise_policy_external_allowed
# [/DEF:test_enterprise_policy_missing_prohibited:Function]
# [DEF:test_enterprise_policy_external_allowed:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that an enterprise policy allowing external sources is rejected.
def test_enterprise_policy_external_allowed(valid_policy_data): def test_enterprise_policy_external_allowed(valid_policy_data):
valid_policy_data["external_source_forbidden"] = False valid_policy_data["external_source_forbidden"] = False
with pytest.raises(ValueError, match="enterprise-clean policy requires external_source_forbidden=true"): with pytest.raises(ValueError, match="enterprise-clean policy requires external_source_forbidden=true"):
@@ -79,6 +102,11 @@ def test_enterprise_policy_external_allowed(valid_policy_data):
# @TEST_INVARIANT: manifest_consistency # @TEST_INVARIANT: manifest_consistency
# @TEST_EDGE: manifest_count_mismatch # @TEST_EDGE: manifest_count_mismatch
# [/DEF:test_enterprise_policy_external_allowed:Function]
# [DEF:test_manifest_count_mismatch:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify that a manifest with count mismatches is rejected.
def test_manifest_count_mismatch(): def test_manifest_count_mismatch():
summary = ManifestSummary(included_count=1, excluded_count=0, prohibited_detected_count=0) summary = ManifestSummary(included_count=1, excluded_count=0, prohibited_detected_count=0)
item = ManifestItem(path="p", category="c", classification=ClassificationType.ALLOWED, reason="r") item = ManifestItem(path="p", category="c", classification=ClassificationType.ALLOWED, reason="r")
@@ -101,6 +129,11 @@ def test_manifest_count_mismatch():
# @TEST_INVARIANT: run_integrity # @TEST_INVARIANT: run_integrity
# @TEST_EDGE: compliant_run_stage_fail # @TEST_EDGE: compliant_run_stage_fail
# [/DEF:test_manifest_count_mismatch:Function]
# [DEF:test_compliant_run_validation:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify compliant run validation logic and mandatory stage checks.
def test_compliant_run_validation(): def test_compliant_run_validation():
base_run = { base_run = {
"check_run_id": "run1", "check_run_id": "run1",
@@ -130,6 +163,11 @@ def test_compliant_run_validation():
with pytest.raises(ValueError, match="compliant run requires all mandatory stages"): with pytest.raises(ValueError, match="compliant run requires all mandatory stages"):
ComplianceCheckRun(**base_run) ComplianceCheckRun(**base_run)
# [/DEF:test_compliant_run_validation:Function]
# [DEF:test_report_validation:Function]
# @RELATION: BINDS_TO -> __tests__/test_clean_release
# @PURPOSE: Verify compliance report validation based on status and violation counts.
def test_report_validation(): def test_report_validation():
# Valid blocked report # Valid blocked report
ComplianceReport( ComplianceReport(
@@ -147,3 +185,4 @@ def test_report_validation():
operator_summary="Blocked", structured_payload_ref="ref", operator_summary="Blocked", structured_payload_ref="ref",
violations_count=2, blocking_violations_count=0 violations_count=2, blocking_violations_count=0
) )
# [/DEF:test_report_validation:Function]

View File

@@ -15,6 +15,7 @@ from src.core.logger import belief_scope
# [DEF:test_environment_model:Function] # [DEF:test_environment_model:Function]
# @RELATION: BINDS_TO -> test_models
# @PURPOSE: Tests that Environment model correctly stores values. # @PURPOSE: Tests that Environment model correctly stores values.
# @PRE: Environment class is available. # @PRE: Environment class is available.
# @POST: Values are verified. # @POST: Values are verified.

View File

@@ -1,8 +1,8 @@
# [DEF:test_report_models:Module] # [DEF:test_report_models:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Unit tests for report Pydantic models and their validators # @PURPOSE: Unit tests for report Pydantic models and their validators
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.models.report
import sys import sys
from pathlib import Path from pathlib import Path

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.models.assistant:Module] # [DEF:AssistantModels:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: assistant, audit, confirmation, chat # @SEMANTICS: assistant, audit, confirmation, chat
# @PURPOSE: SQLAlchemy models for assistant audit trail and confirmation tokens. # @PURPOSE: SQLAlchemy models for assistant audit trail and confirmation tokens.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: DEPENDS_ON -> backend.src.models.mapping # @RELATION: DEPENDS_ON -> MappingModels
# @INVARIANT: Assistant records preserve immutable ids and creation timestamps. # @INVARIANT: Assistant records preserve immutable ids and creation timestamps.
from datetime import datetime from datetime import datetime
@@ -16,6 +16,7 @@ from .mapping import Base
# [DEF:AssistantAuditRecord:Class] # [DEF:AssistantAuditRecord:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Store audit decisions and outcomes produced by assistant command handling. # @PURPOSE: Store audit decisions and outcomes produced by assistant command handling.
# @RELATION: INHERITS -> MappingModels
# @PRE: user_id must identify the actor for every record. # @PRE: user_id must identify the actor for every record.
# @POST: Audit payload remains available for compliance and debugging. # @POST: Audit payload remains available for compliance and debugging.
class AssistantAuditRecord(Base): class AssistantAuditRecord(Base):
@@ -29,12 +30,15 @@ class AssistantAuditRecord(Base):
message = Column(Text, nullable=True) message = Column(Text, nullable=True)
payload = Column(JSON, nullable=True) payload = Column(JSON, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
# [/DEF:AssistantAuditRecord:Class] # [/DEF:AssistantAuditRecord:Class]
# [DEF:AssistantMessageRecord:Class] # [DEF:AssistantMessageRecord:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Persist chat history entries for assistant conversations. # @PURPOSE: Persist chat history entries for assistant conversations.
# @RELATION: INHERITS -> MappingModels
# @PRE: user_id, conversation_id, role and text must be present. # @PRE: user_id, conversation_id, role and text must be present.
# @POST: Message row can be queried in chronological order. # @POST: Message row can be queried in chronological order.
class AssistantMessageRecord(Base): class AssistantMessageRecord(Base):
@@ -50,12 +54,15 @@ class AssistantMessageRecord(Base):
confirmation_id = Column(String, nullable=True) confirmation_id = Column(String, nullable=True)
payload = Column(JSON, nullable=True) payload = Column(JSON, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
# [/DEF:AssistantMessageRecord:Class] # [/DEF:AssistantMessageRecord:Class]
# [DEF:AssistantConfirmationRecord:Class] # [DEF:AssistantConfirmationRecord:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Persist risky operation confirmation tokens with lifecycle state. # @PURPOSE: Persist risky operation confirmation tokens with lifecycle state.
# @RELATION: INHERITS -> MappingModels
# @PRE: intent/dispatch and expiry timestamp must be provided. # @PRE: intent/dispatch and expiry timestamp must be provided.
# @POST: State transitions can be tracked and audited. # @POST: State transitions can be tracked and audited.
class AssistantConfirmationRecord(Base): class AssistantConfirmationRecord(Base):
@@ -70,5 +77,7 @@ class AssistantConfirmationRecord(Base):
expires_at = Column(DateTime, nullable=False) expires_at = Column(DateTime, nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
consumed_at = Column(DateTime, nullable=True) consumed_at = Column(DateTime, nullable=True)
# [/DEF:AssistantConfirmationRecord:Class] # [/DEF:AssistantConfirmationRecord:Class]
# [/DEF:backend.src.models.assistant:Module] # [/DEF:AssistantModels:Module]

View File

@@ -5,7 +5,7 @@
# @SEMANTICS: auth, models, user, role, permission, sqlalchemy # @SEMANTICS: auth, models, user, role, permission, sqlalchemy
# @PURPOSE: SQLAlchemy models for multi-user authentication and authorization. # @PURPOSE: SQLAlchemy models for multi-user authentication and authorization.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: INHERITS_FROM -> [Base] # @RELATION: INHERITS_FROM -> [MappingModels:Base]
# #
# @INVARIANT: Usernames and emails must be unique. # @INVARIANT: Usernames and emails must be unique.
@@ -20,12 +20,16 @@ from .mapping import Base
# [DEF:generate_uuid:Function] # [DEF:generate_uuid:Function]
# @PURPOSE: Generates a unique UUID string. # @PURPOSE: Generates a unique UUID string.
# @POST: Returns a string representation of a new UUID. # @POST: Returns a string representation of a new UUID.
# @RELATION: DEPENDS_ON -> uuid
def generate_uuid(): def generate_uuid():
return str(uuid.uuid4()) return str(uuid.uuid4())
# [/DEF:generate_uuid:Function] # [/DEF:generate_uuid:Function]
# [DEF:user_roles:Table] # [DEF:user_roles:Table]
# @PURPOSE: Association table for many-to-many relationship between Users and Roles. # @PURPOSE: Association table for many-to-many relationship between Users and Roles.
# @RELATION: DEPENDS_ON -> Base.metadata
# @RELATION: DEPENDS_ON -> User
# @RELATION: DEPENDS_ON -> Role
user_roles = Table( user_roles = Table(
"user_roles", "user_roles",
Base.metadata, Base.metadata,
@@ -36,6 +40,9 @@ user_roles = Table(
# [DEF:role_permissions:Table] # [DEF:role_permissions:Table]
# @PURPOSE: Association table for many-to-many relationship between Roles and Permissions. # @PURPOSE: Association table for many-to-many relationship between Roles and Permissions.
# @RELATION: DEPENDS_ON -> Base.metadata
# @RELATION: DEPENDS_ON -> Role
# @RELATION: DEPENDS_ON -> Permission
role_permissions = Table( role_permissions = Table(
"role_permissions", "role_permissions",
Base.metadata, Base.metadata,

View File

@@ -1,8 +1,9 @@
# [DEF:backend.src.models.clean_release:Module] # [DEF:CleanReleaseModels:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @SEMANTICS: clean-release, models, lifecycle, compliance, evidence, immutability # @SEMANTICS: clean-release, models, lifecycle, compliance, evidence, immutability
# @PURPOSE: Define canonical clean release domain entities and lifecycle guards. # @PURPOSE: Define canonical clean release domain entities and lifecycle guards.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: DEPENDS_ON -> MappingModels
# @PRE: Base mapping model and release enums are available. # @PRE: Base mapping model and release enums are available.
# @POST: Provides SQLAlchemy and dataclass definitions for governance domain. # @POST: Provides SQLAlchemy and dataclass definitions for governance domain.
# @SIDE_EFFECT: None (schema definition). # @SIDE_EFFECT: None (schema definition).
@@ -695,4 +696,4 @@ class CleanReleaseAuditLog(Base):
details_json = Column(JSON, default=dict) details_json = Column(JSON, default=dict)
# [/DEF:CleanReleaseAuditLog:Class] # [/DEF:CleanReleaseAuditLog:Class]
# [/DEF:backend.src.models.clean_release:Module] # [/DEF:CleanReleaseModels:Module]

View File

@@ -1,11 +1,11 @@
# [DEF:backend.src.models.config:Module] # [DEF:ConfigModels:Module]
# #
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @SEMANTICS: database, config, settings, sqlalchemy, notification # @SEMANTICS: database, config, settings, sqlalchemy, notification
# @PURPOSE: Defines SQLAlchemy persistence models for application and notification configuration records. # @PURPOSE: Defines SQLAlchemy persistence models for application and notification configuration records.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: [DEPENDS_ON] ->[sqlalchemy]
# @RELATION: [DEPENDS_ON] ->[backend.src.models.mapping:Base] # @RELATION: [DEPENDS_ON] -> [MappingModels:Base]
# @INVARIANT: Configuration payload and notification credentials must remain persisted as non-null JSON documents. # @INVARIANT: Configuration payload and notification credentials must remain persisted as non-null JSON documents.
from sqlalchemy import Column, String, DateTime, JSON, Boolean from sqlalchemy import Column, String, DateTime, JSON, Boolean
@@ -50,4 +50,4 @@ class NotificationConfig(Base):
import uuid import uuid
# [/DEF:backend.src.models.config:Module] # [/DEF:ConfigModels:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.models.connection:Module] # [DEF:ConnectionModels:Module]
# #
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @SEMANTICS: database, connection, configuration, sqlalchemy, sqlite # @SEMANTICS: database, connection, configuration, sqlalchemy, sqlite
@@ -33,4 +33,4 @@ class ConnectionConfig(Base):
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now()) updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
# [/DEF:ConnectionConfig:Class] # [/DEF:ConnectionConfig:Class]
# [/DEF:backend.src.models.connection:Module] # [/DEF:ConnectionModels:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.models.dashboard:Module] # [DEF:DashboardModels:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: dashboard, model, metadata, migration # @SEMANTICS: dashboard, model, metadata, migration
# @PURPOSE: Defines data models for dashboard metadata and selection. # @PURPOSE: Defines data models for dashboard metadata and selection.
# @LAYER: Model # @LAYER: Model
# @RELATION: USED_BY -> backend.src.api.routes.migration # @RELATION: USED_BY -> MigrationApi
from pydantic import BaseModel from pydantic import BaseModel
from typing import List from typing import List
@@ -29,4 +29,4 @@ class DashboardSelection(BaseModel):
fix_cross_filters: bool = True fix_cross_filters: bool = True
# [/DEF:DashboardSelection:Class] # [/DEF:DashboardSelection:Class]
# [/DEF:backend.src.models.dashboard:Module] # [/DEF:DashboardModels:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.models.llm:Module] # [DEF:LlmModels:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: llm, models, sqlalchemy, persistence # @SEMANTICS: llm, models, sqlalchemy, persistence
# @PURPOSE: SQLAlchemy models for LLM provider configuration and validation results. # @PURPOSE: SQLAlchemy models for LLM provider configuration and validation results.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: INHERITS_FROM -> backend.src.models.mapping.Base # @RELATION: INHERITS_FROM -> MappingModels:Base
from sqlalchemy import Column, String, Boolean, DateTime, JSON, Text, Time, ForeignKey from sqlalchemy import Column, String, Boolean, DateTime, JSON, Text, Time, ForeignKey
from datetime import datetime from datetime import datetime
@@ -65,4 +65,4 @@ class ValidationRecord(Base):
raw_response = Column(Text, nullable=True) raw_response = Column(Text, nullable=True)
# [/DEF:ValidationRecord:Class] # [/DEF:ValidationRecord:Class]
# [/DEF:backend.src.models.llm:Module] # [/DEF:LlmModels:Module]

View File

@@ -5,7 +5,8 @@
# @SEMANTICS: database, mapping, environment, migration, sqlalchemy, sqlite # @SEMANTICS: database, mapping, environment, migration, sqlalchemy, sqlite
# @PURPOSE: Defines the database schema for environment metadata and database mappings using SQLAlchemy. # @PURPOSE: Defines the database schema for environment metadata and database mappings using SQLAlchemy.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: DEPENDS_ON -> [sqlalchemy] # @RELATION: DEPENDS_ON -> sqlalchemy
# #
# @INVARIANT: All primary keys are UUID strings. # @INVARIANT: All primary keys are UUID strings.
# @CONSTRAINT: source_env_id and target_env_id must be valid environment IDs. # @CONSTRAINT: source_env_id and target_env_id must be valid environment IDs.
@@ -44,6 +45,7 @@ class MigrationStatus(enum.Enum):
# [DEF:Environment:Class] # [DEF:Environment:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Represents a Superset instance environment. # @PURPOSE: Represents a Superset instance environment.
# @RELATION: DEPENDS_ON -> MappingModels
class Environment(Base): class Environment(Base):
__tablename__ = "environments" __tablename__ = "environments"
@@ -87,6 +89,7 @@ class MigrationJob(Base):
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Maps a universal UUID for a resource to its actual ID on a specific environment. # @PURPOSE: Maps a universal UUID for a resource to its actual ID on a specific environment.
# @TEST_DATA: resource_mapping_record -> {'environment_id': 'prod-env-1', 'resource_type': 'chart', 'uuid': '123e4567-e89b-12d3-a456-426614174000', 'remote_integer_id': '42'} # @TEST_DATA: resource_mapping_record -> {'environment_id': 'prod-env-1', 'resource_type': 'chart', 'uuid': '123e4567-e89b-12d3-a456-426614174000', 'remote_integer_id': '42'}
# @RELATION: DEPENDS_ON -> MappingModels
class ResourceMapping(Base): class ResourceMapping(Base):
__tablename__ = "resource_mappings" __tablename__ = "resource_mappings"

View File

@@ -6,7 +6,7 @@
# @PURPOSE: Defines persistent per-user profile settings for dashboard filter, Git identity/token, and UX preferences. # @PURPOSE: Defines persistent per-user profile settings for dashboard filter, Git identity/token, and UX preferences.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: DEPENDS_ON -> [AuthModels] # @RELATION: DEPENDS_ON -> [AuthModels]
# @RELATION: INHERITS_FROM -> [Base] # @RELATION: INHERITS_FROM -> [MappingModels:Base]
# #
# @INVARIANT: Exactly one preference row exists per user_id. # @INVARIANT: Exactly one preference row exists per user_id.
# @INVARIANT: Sensitive Git token is stored encrypted and never returned in plaintext. # @INVARIANT: Sensitive Git token is stored encrypted and never returned in plaintext.
@@ -23,6 +23,7 @@ from .mapping import Base
# [DEF:UserDashboardPreference:Class] # [DEF:UserDashboardPreference:Class]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Stores Superset username binding and default "my dashboards" toggle for one authenticated user. # @PURPOSE: Stores Superset username binding and default "my dashboards" toggle for one authenticated user.
# @RELATION: INHERITS -> MappingModels:Base
class UserDashboardPreference(Base): class UserDashboardPreference(Base):
__tablename__ = "user_dashboard_preferences" __tablename__ = "user_dashboard_preferences"

View File

@@ -1,5 +1,5 @@
# [DEF:backend.src.models.report:Module] # [DEF:ReportModels:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @SEMANTICS: reports, models, pydantic, normalization, pagination # @SEMANTICS: reports, models, pydantic, normalization, pagination
# @PURPOSE: Canonical report schemas for unified task reporting across heterogeneous task types. # @PURPOSE: Canonical report schemas for unified task reporting across heterogeneous task types.
# @LAYER: Domain # @LAYER: Domain
@@ -7,7 +7,7 @@
# @POST: Provides validated schemas for cross-plugin reporting and UI consumption. # @POST: Provides validated schemas for cross-plugin reporting and UI consumption.
# @SIDE_EFFECT: None (schema definition). # @SIDE_EFFECT: None (schema definition).
# @DATA_CONTRACT: Model[TaskReport, ReportCollection, ReportDetailView] # @DATA_CONTRACT: Model[TaskReport, ReportCollection, ReportDetailView]
# @RELATION: [DEPENDS_ON] ->[backend.src.core.task_manager.models] # @RELATION: [DEPENDS_ON] -> [TaskModels]
# @INVARIANT: Canonical report fields are always present for every report item. # @INVARIANT: Canonical report fields are always present for every report item.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
@@ -20,8 +20,9 @@ from pydantic import BaseModel, Field, field_validator, model_validator
# [DEF:TaskType:Class] # [DEF:TaskType:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: Must contain valid generic task type mappings. # @INVARIANT: Must contain valid generic task type mappings.
# @RELATION: DEPENDS_ON -> ReportModels
# @SEMANTICS: enum, type, task # @SEMANTICS: enum, type, task
# @PURPOSE: Supported normalized task report types. # @PURPOSE: Supported normalized task report types.
class TaskType(str, Enum): class TaskType(str, Enum):
@@ -31,11 +32,13 @@ class TaskType(str, Enum):
DOCUMENTATION = "documentation" DOCUMENTATION = "documentation"
CLEAN_RELEASE = "clean_release" CLEAN_RELEASE = "clean_release"
UNKNOWN = "unknown" UNKNOWN = "unknown"
# [/DEF:TaskType:Class] # [/DEF:TaskType:Class]
# [DEF:ReportStatus:Class] # [DEF:ReportStatus:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: TaskStatus enum mapping logic holds. # @INVARIANT: TaskStatus enum mapping logic holds.
# @SEMANTICS: enum, status, task # @SEMANTICS: enum, status, task
# @PURPOSE: Supported normalized report status values. # @PURPOSE: Supported normalized report status values.
@@ -44,11 +47,13 @@ class ReportStatus(str, Enum):
FAILED = "failed" FAILED = "failed"
IN_PROGRESS = "in_progress" IN_PROGRESS = "in_progress"
PARTIAL = "partial" PARTIAL = "partial"
# [/DEF:ReportStatus:Class] # [/DEF:ReportStatus:Class]
# [DEF:ErrorContext:Class] # [DEF:ErrorContext:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: The properties accurately describe error state. # @INVARIANT: The properties accurately describe error state.
# @SEMANTICS: error, context, payload # @SEMANTICS: error, context, payload
# @PURPOSE: Error and recovery context for failed/partial reports. # @PURPOSE: Error and recovery context for failed/partial reports.
@@ -69,11 +74,13 @@ class ErrorContext(BaseModel):
code: Optional[str] = None code: Optional[str] = None
message: str message: str
next_actions: List[str] = Field(default_factory=list) next_actions: List[str] = Field(default_factory=list)
# [/DEF:ErrorContext:Class] # [/DEF:ErrorContext:Class]
# [DEF:TaskReport:Class] # [DEF:TaskReport:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: Must represent canonical task record attributes. # @INVARIANT: Must represent canonical task record attributes.
# @SEMANTICS: report, model, summary # @SEMANTICS: report, model, summary
# @PURPOSE: Canonical normalized report envelope for one task execution. # @PURPOSE: Canonical normalized report envelope for one task execution.
@@ -126,11 +133,13 @@ class TaskReport(BaseModel):
if not isinstance(value, str) or not value.strip(): if not isinstance(value, str) or not value.strip():
raise ValueError("Value must be a non-empty string") raise ValueError("Value must be a non-empty string")
return value.strip() return value.strip()
# [/DEF:TaskReport:Class] # [/DEF:TaskReport:Class]
# [DEF:ReportQuery:Class] # [DEF:ReportQuery:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: Time and pagination queries are mutually consistent. # @INVARIANT: Time and pagination queries are mutually consistent.
# @SEMANTICS: query, filter, search # @SEMANTICS: query, filter, search
# @PURPOSE: Query object for server-side report filtering, sorting, and pagination. # @PURPOSE: Query object for server-side report filtering, sorting, and pagination.
@@ -184,11 +193,13 @@ class ReportQuery(BaseModel):
if self.time_from and self.time_to and self.time_from > self.time_to: if self.time_from and self.time_to and self.time_from > self.time_to:
raise ValueError("time_from must be less than or equal to time_to") raise ValueError("time_from must be less than or equal to time_to")
return self return self
# [/DEF:ReportQuery:Class] # [/DEF:ReportQuery:Class]
# [DEF:ReportCollection:Class] # [DEF:ReportCollection:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: Represents paginated data correctly. # @INVARIANT: Represents paginated data correctly.
# @SEMANTICS: collection, pagination # @SEMANTICS: collection, pagination
# @PURPOSE: Paginated collection of normalized task reports. # @PURPOSE: Paginated collection of normalized task reports.
@@ -209,11 +220,13 @@ class ReportCollection(BaseModel):
page_size: int = Field(ge=1) page_size: int = Field(ge=1)
has_next: bool has_next: bool
applied_filters: ReportQuery applied_filters: ReportQuery
# [/DEF:ReportCollection:Class] # [/DEF:ReportCollection:Class]
# [DEF:ReportDetailView:Class] # [DEF:ReportDetailView:Class]
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @INVARIANT: Incorporates a report and logs correctly. # @INVARIANT: Incorporates a report and logs correctly.
# @SEMANTICS: view, detail, logs # @SEMANTICS: view, detail, logs
# @PURPOSE: Detailed report representation including diagnostics and recovery actions. # @PURPOSE: Detailed report representation including diagnostics and recovery actions.
@@ -230,6 +243,8 @@ class ReportDetailView(BaseModel):
timeline: List[Dict[str, Any]] = Field(default_factory=list) timeline: List[Dict[str, Any]] = Field(default_factory=list)
diagnostics: Optional[Dict[str, Any]] = None diagnostics: Optional[Dict[str, Any]] = None
next_actions: List[str] = Field(default_factory=list) next_actions: List[str] = Field(default_factory=list)
# [/DEF:ReportDetailView:Class] # [/DEF:ReportDetailView:Class]
# [/DEF:backend.src.models.report:Module] # [/DEF:ReportModels:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.models.storage:Module] # [DEF:StorageModels:Module]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @SEMANTICS: storage, file, model, pydantic # @SEMANTICS: storage, file, model, pydantic
# @PURPOSE: Data models for the storage system. # @PURPOSE: Data models for the storage system.
@@ -41,4 +41,4 @@ class StoredFile(BaseModel):
mime_type: Optional[str] = Field(None, description="MIME type of the file.") mime_type: Optional[str] = Field(None, description="MIME type of the file.")
# [/DEF:StoredFile:Class] # [/DEF:StoredFile:Class]
# [/DEF:backend.src.models.storage:Module] # [/DEF:StorageModels:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.models.task:Module] # [DEF:TaskModels:Module]
# #
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @SEMANTICS: database, task, record, sqlalchemy, sqlite # @SEMANTICS: database, task, record, sqlalchemy, sqlite
@@ -36,7 +36,7 @@ class TaskRecord(Base):
# [DEF:TaskLogRecord:Class] # [DEF:TaskLogRecord:Class]
# @PURPOSE: Represents a single persistent log entry for a task. # @PURPOSE: Represents a single persistent log entry for a task.
# @COMPLEXITY: 5 # @COMPLEXITY: 3
# @RELATION: DEPENDS_ON -> TaskRecord # @RELATION: DEPENDS_ON -> TaskRecord
# @INVARIANT: Each log entry belongs to exactly one task. # @INVARIANT: Each log entry belongs to exactly one task.
# #
@@ -113,4 +113,4 @@ class TaskLogRecord(Base):
) )
# [/DEF:TaskLogRecord:Class] # [/DEF:TaskLogRecord:Class]
# [/DEF:backend.src.models.task:Module] # [/DEF:TaskModels:Module]

View File

@@ -1,4 +1,5 @@
# [DEF:backend.src.plugins.llm_analysis.__tests__.test_client_headers:Module] # [DEF:TestClientHeaders:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, llm-client, openrouter, headers # @SEMANTICS: tests, llm-client, openrouter, headers
# @PURPOSE: Verify OpenRouter client initialization includes provider-specific headers. # @PURPOSE: Verify OpenRouter client initialization includes provider-specific headers.
@@ -8,6 +9,7 @@ from src.plugins.llm_analysis.service import LLMClient
# [DEF:test_openrouter_client_includes_referer_and_title_headers:Function] # [DEF:test_openrouter_client_includes_referer_and_title_headers:Function]
# @RELATION: BINDS_TO -> TestClientHeaders
# @PURPOSE: OpenRouter requests should carry site/app attribution headers for compatibility. # @PURPOSE: OpenRouter requests should carry site/app attribution headers for compatibility.
# @PRE: Client is initialized for OPENROUTER provider. # @PRE: Client is initialized for OPENROUTER provider.
# @POST: Async client headers include Authorization, HTTP-Referer, and X-Title. # @POST: Async client headers include Authorization, HTTP-Referer, and X-Title.
@@ -27,4 +29,4 @@ def test_openrouter_client_includes_referer_and_title_headers(monkeypatch):
assert headers["HTTP-Referer"] == "http://localhost:8000" assert headers["HTTP-Referer"] == "http://localhost:8000"
assert headers["X-Title"] == "ss-tools-test" assert headers["X-Title"] == "ss-tools-test"
# [/DEF:test_openrouter_client_includes_referer_and_title_headers:Function] # [/DEF:test_openrouter_client_includes_referer_and_title_headers:Function]
# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_client_headers:Module] # [/DEF:TestClientHeaders:Module]

View File

@@ -1,4 +1,5 @@
# [DEF:backend.src.plugins.llm_analysis.__tests__.test_service:Module] # [DEF:TestService:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, llm-analysis, fallback, provider-error, unknown-status # @SEMANTICS: tests, llm-analysis, fallback, provider-error, unknown-status
# @PURPOSE: Verify LLM analysis transport/provider failures do not masquerade as dashboard FAIL results. # @PURPOSE: Verify LLM analysis transport/provider failures do not masquerade as dashboard FAIL results.
@@ -10,6 +11,7 @@ from src.plugins.llm_analysis.service import LLMClient
# [DEF:test_test_runtime_connection_uses_json_completion_transport:Function] # [DEF:test_test_runtime_connection_uses_json_completion_transport:Function]
# @RELATION: BINDS_TO -> TestService
# @PURPOSE: Provider self-test must exercise the same chat completion transport as runtime analysis. # @PURPOSE: Provider self-test must exercise the same chat completion transport as runtime analysis.
# @PRE: get_json_completion is available on initialized client. # @PRE: get_json_completion is available on initialized client.
# @POST: Self-test forwards a lightweight user message into get_json_completion and returns its payload. # @POST: Self-test forwards a lightweight user message into get_json_completion and returns its payload.
@@ -38,6 +40,7 @@ async def test_test_runtime_connection_uses_json_completion_transport(monkeypatc
# [DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function] # [DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function]
# @RELATION: BINDS_TO -> TestService
# @PURPOSE: Infrastructure/provider failures must produce UNKNOWN analysis status rather than FAIL. # @PURPOSE: Infrastructure/provider failures must produce UNKNOWN analysis status rather than FAIL.
# @PRE: LLMClient.get_json_completion raises provider/auth exception. # @PRE: LLMClient.get_json_completion raises provider/auth exception.
# @POST: Returned payload uses status=UNKNOWN and issue severity UNKNOWN. # @POST: Returned payload uses status=UNKNOWN and issue severity UNKNOWN.
@@ -64,4 +67,4 @@ async def test_analyze_dashboard_provider_error_maps_to_unknown(monkeypatch, tmp
assert "Failed to get response from LLM" in result["summary"] assert "Failed to get response from LLM" in result["summary"]
assert result["issues"][0]["severity"] == "UNKNOWN" assert result["issues"][0]["severity"] == "UNKNOWN"
# [/DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function] # [/DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function]
# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_service:Module] # [/DEF:TestService:Module]

View File

@@ -1,4 +1,5 @@
# [DEF:backend.src.schemas.__tests__.test_settings_and_health_schemas:Module] # [DEF:TestSettingsAndHealthSchemas:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Regression tests for settings and health schema contracts updated in 026 fix batch. # @PURPOSE: Regression tests for settings and health schema contracts updated in 026 fix batch.
@@ -10,6 +11,7 @@ from src.schemas.settings import ValidationPolicyCreate
# [DEF:test_validation_policy_create_accepts_structured_custom_channels:Function] # [DEF:test_validation_policy_create_accepts_structured_custom_channels:Function]
# @RELATION: BINDS_TO -> TestSettingsAndHealthSchemas
# @PURPOSE: Ensure policy schema accepts structured custom channel objects with type/target fields. # @PURPOSE: Ensure policy schema accepts structured custom channel objects with type/target fields.
def test_validation_policy_create_accepts_structured_custom_channels(): def test_validation_policy_create_accepts_structured_custom_channels():
payload = { payload = {
@@ -34,6 +36,7 @@ def test_validation_policy_create_accepts_structured_custom_channels():
# [DEF:test_validation_policy_create_rejects_legacy_string_custom_channels:Function] # [DEF:test_validation_policy_create_rejects_legacy_string_custom_channels:Function]
# @RELATION: BINDS_TO -> TestSettingsAndHealthSchemas
# @PURPOSE: Ensure legacy list[str] custom channel payload is rejected by typed channel contract. # @PURPOSE: Ensure legacy list[str] custom channel payload is rejected by typed channel contract.
def test_validation_policy_create_rejects_legacy_string_custom_channels(): def test_validation_policy_create_rejects_legacy_string_custom_channels():
payload = { payload = {
@@ -53,6 +56,7 @@ def test_validation_policy_create_rejects_legacy_string_custom_channels():
# [DEF:test_dashboard_health_item_status_accepts_only_whitelisted_values:Function] # [DEF:test_dashboard_health_item_status_accepts_only_whitelisted_values:Function]
# @RELATION: BINDS_TO -> TestSettingsAndHealthSchemas
# @PURPOSE: Verify strict grouped regex only accepts PASS/WARN/FAIL/UNKNOWN exact statuses. # @PURPOSE: Verify strict grouped regex only accepts PASS/WARN/FAIL/UNKNOWN exact statuses.
def test_dashboard_health_item_status_accepts_only_whitelisted_values(): def test_dashboard_health_item_status_accepts_only_whitelisted_values():
valid = DashboardHealthItem( valid = DashboardHealthItem(
@@ -81,4 +85,4 @@ def test_dashboard_health_item_status_accepts_only_whitelisted_values():
# [/DEF:test_dashboard_health_item_status_accepts_only_whitelisted_values:Function] # [/DEF:test_dashboard_health_item_status_accepts_only_whitelisted_values:Function]
# [/DEF:backend.src.schemas.__tests__.test_settings_and_health_schemas:Module] # [/DEF:TestSettingsAndHealthSchemas:Module]

View File

@@ -1,5 +1,5 @@
# [DEF:backend.src.services:Module] # [DEF:services:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 2
# @SEMANTICS: services, package, init # @SEMANTICS: services, package, init
# @PURPOSE: Package initialization for services module # @PURPOSE: Package initialization for services module
# @LAYER: Core # @LAYER: Core
@@ -18,4 +18,4 @@ def __getattr__(name):
from .resource_service import ResourceService from .resource_service import ResourceService
return ResourceService return ResourceService
raise AttributeError(f"module {__name__!r} has no attribute {name!r}") raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
# [/DEF:backend.src.services:Module] # [/DEF:services:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:test_encryption_manager:Module] # [DEF:test_encryption_manager:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: encryption, security, fernet, api-keys, tests # @SEMANTICS: encryption, security, fernet, api-keys, tests
# @PURPOSE: Unit tests for EncryptionManager encrypt/decrypt functionality. # @PURPOSE: Unit tests for EncryptionManager encrypt/decrypt functionality.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.llm_provider.EncryptionManager
# @INVARIANT: Encrypt+decrypt roundtrip always returns original plaintext. # @INVARIANT: Encrypt+decrypt roundtrip always returns original plaintext.
import sys import sys
@@ -16,6 +16,7 @@ from cryptography.fernet import Fernet, InvalidToken
# [DEF:TestEncryptionManager:Class] # [DEF:TestEncryptionManager:Class]
# @RELATION: BINDS_TO -> test_encryption_manager
# @PURPOSE: Validate EncryptionManager encrypt/decrypt roundtrip, uniqueness, and error handling. # @PURPOSE: Validate EncryptionManager encrypt/decrypt roundtrip, uniqueness, and error handling.
# @PRE: cryptography package installed. # @PRE: cryptography package installed.
# @POST: All encrypt/decrypt invariants verified. # @POST: All encrypt/decrypt invariants verified.

View File

@@ -11,7 +11,9 @@ from src.plugins.llm_analysis import plugin as plugin_module
# [DEF:_DummyLogger:Class] # [DEF:_DummyLogger:Class]
# @RELATION: BINDS_TO ->[test_llm_plugin_persistence] # @RELATION: BINDS_TO ->[test_llm_plugin_persistence]
# @COMPLEXITY: 1
# @PURPOSE: Minimal logger shim for TaskContext-like objects used in tests. # @PURPOSE: Minimal logger shim for TaskContext-like objects used in tests.
# @INVARIANT: Logging methods are no-ops and must not mutate test state.
class _DummyLogger: class _DummyLogger:
def with_source(self, _source: str): def with_source(self, _source: str):
return self return self
@@ -34,7 +36,9 @@ class _DummyLogger:
# [DEF:_FakeDBSession:Class] # [DEF:_FakeDBSession:Class]
# @RELATION: BINDS_TO ->[test_llm_plugin_persistence] # @RELATION: BINDS_TO ->[test_llm_plugin_persistence]
# @COMPLEXITY: 2
# @PURPOSE: Captures persisted records for assertion and mimics SQLAlchemy session methods used by plugin. # @PURPOSE: Captures persisted records for assertion and mimics SQLAlchemy session methods used by plugin.
# @INVARIANT: add/commit/close provide only persistence signals asserted by this test.
class _FakeDBSession: class _FakeDBSession:
def __init__(self): def __init__(self):
self.added = None self.added = None
@@ -90,6 +94,11 @@ async def test_dashboard_validation_plugin_persists_task_and_environment_ids(
async def capture_dashboard(self, _dashboard_id, _screenshot_path): async def capture_dashboard(self, _dashboard_id, _screenshot_path):
return None return None
# [DEF:_FakeLLMClient:Class]
# @RELATION: BINDS_TO ->[test_dashboard_validation_plugin_persists_task_and_environment_ids]
# @COMPLEXITY: 2
# @PURPOSE: Deterministic LLM client double returning canonical analysis payload for persistence-path assertions.
# @INVARIANT: analyze_dashboard is side-effect free and returns schema-compatible PASS result.
class _FakeLLMClient: class _FakeLLMClient:
def __init__(self, **_kwargs): def __init__(self, **_kwargs):
return None return None
@@ -101,6 +110,8 @@ async def test_dashboard_validation_plugin_persists_task_and_environment_ids(
"issues": [], "issues": [],
} }
# [/DEF:_FakeLLMClient:Class]
class _FakeNotificationService: class _FakeNotificationService:
def __init__(self, *_args, **_kwargs): def __init__(self, *_args, **_kwargs):
return None return None

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module] # [DEF:test_llm_prompt_templates:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, llm, prompts, templates, settings # @SEMANTICS: tests, llm, prompts, templates, settings
# @PURPOSE: Validate normalization and rendering behavior for configurable LLM prompt templates. # @PURPOSE: Validate normalization and rendering behavior for configurable LLM prompt templates.
# @LAYER: Domain Tests # @LAYER: Domain Tests
# @RELATION: DEPENDS_ON -> backend.src.services.llm_prompt_templates # @RELATION: DEPENDS_ON ->[backend.src.services.llm_prompt_templates:Function]
# @INVARIANT: All required prompt keys remain available after normalization. # @INVARIANT: All required prompt keys remain available after normalization.
from src.services.llm_prompt_templates import ( from src.services.llm_prompt_templates import (
@@ -18,10 +18,13 @@ from src.services.llm_prompt_templates import (
# [DEF:test_normalize_llm_settings_adds_default_prompts:Function] # [DEF:test_normalize_llm_settings_adds_default_prompts:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Ensure legacy/partial llm settings are expanded with all prompt defaults. # @PURPOSE: Ensure legacy/partial llm settings are expanded with all prompt defaults.
# @PRE: Input llm settings do not contain complete prompts object. # @PRE: Input llm settings do not contain complete prompts object.
# @POST: Returned structure includes required prompt templates with fallback defaults. # @POST: Returned structure includes required prompt templates with fallback defaults.
# [DEF:test_normalize_llm_settings_adds_default_prompts:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
def test_normalize_llm_settings_adds_default_prompts(): def test_normalize_llm_settings_adds_default_prompts():
normalized = normalize_llm_settings({"default_provider": "x"}) normalized = normalize_llm_settings({"default_provider": "x"})
@@ -39,10 +42,15 @@ def test_normalize_llm_settings_adds_default_prompts():
# [DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function] # [DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Ensure user-customized prompt values are preserved during normalization. # @PURPOSE: Ensure user-customized prompt values are preserved during normalization.
# @PRE: Input llm settings contain custom prompt override. # @PRE: Input llm settings contain custom prompt override.
# @POST: Custom prompt value remains unchanged in normalized output. # @POST: Custom prompt value remains unchanged in normalized output.
# [/DEF:test_normalize_llm_settings_adds_default_prompts:Function]
# [DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
def test_normalize_llm_settings_keeps_custom_prompt_values(): def test_normalize_llm_settings_keeps_custom_prompt_values():
custom = "Doc for {dataset_name} using {columns_json}" custom = "Doc for {dataset_name} using {columns_json}"
normalized = normalize_llm_settings( normalized = normalize_llm_settings(
@@ -54,10 +62,15 @@ def test_normalize_llm_settings_keeps_custom_prompt_values():
# [DEF:test_render_prompt_replaces_known_placeholders:Function] # [DEF:test_render_prompt_replaces_known_placeholders:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Ensure template placeholders are deterministically replaced. # @PURPOSE: Ensure template placeholders are deterministically replaced.
# @PRE: Template contains placeholders matching provided variables. # @PRE: Template contains placeholders matching provided variables.
# @POST: Rendered prompt string contains substituted values. # @POST: Rendered prompt string contains substituted values.
# [/DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
# [DEF:test_render_prompt_replaces_known_placeholders:Function]
# @RELATION: BINDS_TO -> test_llm_prompt_templates
def test_render_prompt_replaces_known_placeholders(): def test_render_prompt_replaces_known_placeholders():
rendered = render_prompt( rendered = render_prompt(
"Hello {name}, diff={diff}", "Hello {name}, diff={diff}",
@@ -69,8 +82,11 @@ def test_render_prompt_replaces_known_placeholders():
# [DEF:test_is_multimodal_model_detects_known_vision_models:Function] # [DEF:test_is_multimodal_model_detects_known_vision_models:Function]
# @COMPLEXITY: 3 # @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 2
# @PURPOSE: Ensure multimodal model detection recognizes common vision-capable model names. # @PURPOSE: Ensure multimodal model detection recognizes common vision-capable model names.
# [/DEF:test_render_prompt_replaces_known_placeholders:Function]
def test_is_multimodal_model_detects_known_vision_models(): def test_is_multimodal_model_detects_known_vision_models():
assert is_multimodal_model("gpt-4o") is True assert is_multimodal_model("gpt-4o") is True
assert is_multimodal_model("claude-3-5-sonnet") is True assert is_multimodal_model("claude-3-5-sonnet") is True
@@ -80,7 +96,8 @@ def test_is_multimodal_model_detects_known_vision_models():
# [DEF:test_resolve_bound_provider_id_prefers_binding_then_default:Function] # [DEF:test_resolve_bound_provider_id_prefers_binding_then_default:Function]
# @COMPLEXITY: 3 # @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 2
# @PURPOSE: Verify provider binding resolution priority. # @PURPOSE: Verify provider binding resolution priority.
def test_resolve_bound_provider_id_prefers_binding_then_default(): def test_resolve_bound_provider_id_prefers_binding_then_default():
settings = { settings = {
@@ -93,7 +110,8 @@ def test_resolve_bound_provider_id_prefers_binding_then_default():
# [DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function] # [DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function]
# @COMPLEXITY: 3 # @RELATION: BINDS_TO -> test_llm_prompt_templates
# @COMPLEXITY: 2
# @PURPOSE: Ensure assistant planner provider/model fields are preserved and normalized. # @PURPOSE: Ensure assistant planner provider/model fields are preserved and normalized.
def test_normalize_llm_settings_keeps_assistant_planner_settings(): def test_normalize_llm_settings_keeps_assistant_planner_settings():
normalized = normalize_llm_settings( normalized = normalize_llm_settings(
@@ -107,4 +125,4 @@ def test_normalize_llm_settings_keeps_assistant_planner_settings():
# [/DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function] # [/DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function]
# [/DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module] # [/DEF:test_llm_prompt_templates:Module]

View File

@@ -14,11 +14,14 @@ from src.plugins.llm_analysis.models import LLMProviderConfig, LLMProviderType
# [DEF:_test_encryption_key_fixture:Global] # [DEF:_test_encryption_key_fixture:Global]
# @PURPOSE: Ensure encryption-dependent provider tests run with a valid Fernet key. # @PURPOSE: Ensure encryption-dependent provider tests run with a valid Fernet key.
# @RELATION: DEPENDS_ON ->[pytest:Module]
os.environ.setdefault("ENCRYPTION_KEY", Fernet.generate_key().decode()) os.environ.setdefault("ENCRYPTION_KEY", Fernet.generate_key().decode())
# [/DEF:_test_encryption_key_fixture:Global] # [/DEF:_test_encryption_key_fixture:Global]
# @TEST_CONTRACT: EncryptionManagerModel -> Invariants # @TEST_CONTRACT: EncryptionManagerModel -> Invariants
# @TEST_INVARIANT: symmetric_encryption # @TEST_INVARIANT: symmetric_encryption
# [DEF:test_encryption_cycle:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_encryption_cycle(): def test_encryption_cycle():
"""Verify encrypted data can be decrypted back to original string.""" """Verify encrypted data can be decrypted back to original string."""
manager = EncryptionManager() manager = EncryptionManager()
@@ -28,6 +31,10 @@ def test_encryption_cycle():
assert manager.decrypt(encrypted) == original assert manager.decrypt(encrypted) == original
# @TEST_EDGE: empty_string_encryption # @TEST_EDGE: empty_string_encryption
# [/DEF:test_encryption_cycle:Function]
# [DEF:test_empty_string_encryption:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_empty_string_encryption(): def test_empty_string_encryption():
manager = EncryptionManager() manager = EncryptionManager()
original = "" original = ""
@@ -35,12 +42,18 @@ def test_empty_string_encryption():
assert manager.decrypt(encrypted) == "" assert manager.decrypt(encrypted) == ""
# @TEST_EDGE: decrypt_invalid_data # @TEST_EDGE: decrypt_invalid_data
# [/DEF:test_empty_string_encryption:Function]
# [DEF:test_decrypt_invalid_data:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_decrypt_invalid_data(): def test_decrypt_invalid_data():
manager = EncryptionManager() manager = EncryptionManager()
with pytest.raises(Exception): with pytest.raises(Exception):
manager.decrypt("not-encrypted-string") manager.decrypt("not-encrypted-string")
# @TEST_FIXTURE: mock_db_session # @TEST_FIXTURE: mock_db_session
# [/DEF:test_decrypt_invalid_data:Function]
@pytest.fixture @pytest.fixture
def mock_db(): def mock_db():
return MagicMock(spec=Session) return MagicMock(spec=Session)
@@ -49,11 +62,17 @@ def mock_db():
def service(mock_db): def service(mock_db):
return LLMProviderService(db=mock_db) return LLMProviderService(db=mock_db)
# [DEF:test_get_all_providers:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_get_all_providers(service, mock_db): def test_get_all_providers(service, mock_db):
service.get_all_providers() service.get_all_providers()
mock_db.query.assert_called() mock_db.query.assert_called()
mock_db.query().all.assert_called() mock_db.query().all.assert_called()
# [/DEF:test_get_all_providers:Function]
# [DEF:test_create_provider:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_create_provider(service, mock_db): def test_create_provider(service, mock_db):
config = LLMProviderConfig( config = LLMProviderConfig(
provider_type=LLMProviderType.OPENAI, provider_type=LLMProviderType.OPENAI,
@@ -73,6 +92,10 @@ def test_create_provider(service, mock_db):
# Decrypt to verify it matches # Decrypt to verify it matches
assert EncryptionManager().decrypt(provider.api_key) == "sk-test" assert EncryptionManager().decrypt(provider.api_key) == "sk-test"
# [/DEF:test_create_provider:Function]
# [DEF:test_get_decrypted_api_key:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_get_decrypted_api_key(service, mock_db): def test_get_decrypted_api_key(service, mock_db):
# Setup mock provider # Setup mock provider
encrypted_key = EncryptionManager().encrypt("secret-value") encrypted_key = EncryptionManager().encrypt("secret-value")
@@ -82,10 +105,18 @@ def test_get_decrypted_api_key(service, mock_db):
key = service.get_decrypted_api_key("p1") key = service.get_decrypted_api_key("p1")
assert key == "secret-value" assert key == "secret-value"
# [/DEF:test_get_decrypted_api_key:Function]
# [DEF:test_get_decrypted_api_key_not_found:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_get_decrypted_api_key_not_found(service, mock_db): def test_get_decrypted_api_key_not_found(service, mock_db):
mock_db.query().filter().first.return_value = None mock_db.query().filter().first.return_value = None
assert service.get_decrypted_api_key("missing") is None assert service.get_decrypted_api_key("missing") is None
# [/DEF:test_get_decrypted_api_key_not_found:Function]
# [DEF:test_update_provider_ignores_masked_placeholder_api_key:Function]
# @RELATION: BINDS_TO -> __tests__/test_llm_provider
def test_update_provider_ignores_masked_placeholder_api_key(service, mock_db): def test_update_provider_ignores_masked_placeholder_api_key(service, mock_db):
existing_encrypted = EncryptionManager().encrypt("secret-value") existing_encrypted = EncryptionManager().encrypt("secret-value")
mock_provider = LLMProvider( mock_provider = LLMProvider(
@@ -114,3 +145,4 @@ def test_update_provider_ignores_masked_placeholder_api_key(service, mock_db):
assert updated.api_key == existing_encrypted assert updated.api_key == existing_encrypted
assert EncryptionManager().decrypt(updated.api_key) == "secret-value" assert EncryptionManager().decrypt(updated.api_key) == "secret-value"
assert updated.is_active is False assert updated.is_active is False
# [/DEF:test_update_provider_ignores_masked_placeholder_api_key:Function]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.src.services.__tests__.test_rbac_permission_catalog:Module] # [DEF:test_rbac_permission_catalog:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, rbac, permissions, catalog, discovery, sync # @SEMANTICS: tests, rbac, permissions, catalog, discovery, sync
# @PURPOSE: Verifies RBAC permission catalog discovery and idempotent synchronization behavior. # @PURPOSE: Verifies RBAC permission catalog discovery and idempotent synchronization behavior.
# @LAYER: Service Tests # @LAYER: Service Tests
# @RELATION: TESTS -> backend.src.services.rbac_permission_catalog
# @INVARIANT: Synchronization adds only missing normalized permission pairs. # @INVARIANT: Synchronization adds only missing normalized permission pairs.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
@@ -15,6 +15,7 @@ import src.services.rbac_permission_catalog as catalog
# [DEF:test_discover_route_permissions_extracts_declared_pairs_and_ignores_tests:Function] # [DEF:test_discover_route_permissions_extracts_declared_pairs_and_ignores_tests:Function]
# @RELATION: BINDS_TO -> test_rbac_permission_catalog
# @PURPOSE: Ensures route-scanner extracts has_permission pairs from route files and skips __tests__. # @PURPOSE: Ensures route-scanner extracts has_permission pairs from route files and skips __tests__.
# @PRE: Temporary route directory contains route and test files. # @PRE: Temporary route directory contains route and test files.
# @POST: Returned set includes production route permissions and excludes test-only declarations. # @POST: Returned set includes production route permissions and excludes test-only declarations.
@@ -52,6 +53,7 @@ def test_discover_route_permissions_extracts_declared_pairs_and_ignores_tests(tm
# [DEF:test_discover_declared_permissions_unions_route_and_plugin_permissions:Function] # [DEF:test_discover_declared_permissions_unions_route_and_plugin_permissions:Function]
# @RELATION: BINDS_TO -> test_rbac_permission_catalog
# @PURPOSE: Ensures full catalog includes route-level permissions plus dynamic plugin EXECUTE rights. # @PURPOSE: Ensures full catalog includes route-level permissions plus dynamic plugin EXECUTE rights.
# @PRE: Route discovery and plugin loader both return permission sources. # @PRE: Route discovery and plugin loader both return permission sources.
# @POST: Result set contains union of both sources. # @POST: Result set contains union of both sources.
@@ -78,6 +80,7 @@ def test_discover_declared_permissions_unions_route_and_plugin_permissions(monke
# [DEF:test_sync_permission_catalog_inserts_only_missing_normalized_pairs:Function] # [DEF:test_sync_permission_catalog_inserts_only_missing_normalized_pairs:Function]
# @RELATION: BINDS_TO -> test_rbac_permission_catalog
# @PURPOSE: Ensures synchronization inserts only missing pairs and normalizes action/resource tokens. # @PURPOSE: Ensures synchronization inserts only missing pairs and normalizes action/resource tokens.
# @PRE: DB already contains subset of permissions. # @PRE: DB already contains subset of permissions.
# @POST: Only missing normalized pairs are inserted and commit is executed once. # @POST: Only missing normalized pairs are inserted and commit is executed once.
@@ -111,6 +114,7 @@ def test_sync_permission_catalog_inserts_only_missing_normalized_pairs():
# [DEF:test_sync_permission_catalog_is_noop_when_all_permissions_exist:Function] # [DEF:test_sync_permission_catalog_is_noop_when_all_permissions_exist:Function]
# @RELATION: BINDS_TO -> test_rbac_permission_catalog
# @PURPOSE: Ensures synchronization is idempotent when all declared pairs already exist. # @PURPOSE: Ensures synchronization is idempotent when all declared pairs already exist.
# @PRE: DB contains full declared permission set. # @PRE: DB contains full declared permission set.
# @POST: No inserts are added and commit is not called. # @POST: No inserts are added and commit is not called.
@@ -137,4 +141,4 @@ def test_sync_permission_catalog_is_noop_when_all_permissions_exist():
# [/DEF:test_sync_permission_catalog_is_noop_when_all_permissions_exist:Function] # [/DEF:test_sync_permission_catalog_is_noop_when_all_permissions_exist:Function]
# [/DEF:backend.src.services.__tests__.test_rbac_permission_catalog:Module] # [/DEF:test_rbac_permission_catalog:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.auth_service:Module] # [DEF:auth_service:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: auth, service, business-logic, login, jwt, adfs, jit-provisioning # @SEMANTICS: auth, service, business-logic, login, jwt, adfs, jit-provisioning
# @PURPOSE: Orchestrates credential authentication and ADFS JIT user provisioning. # @PURPOSE: Orchestrates credential authentication and ADFS JIT user provisioning.
@@ -30,7 +30,7 @@ from ..core.logger import belief_scope
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Provides high-level authentication services. # @PURPOSE: Provides high-level authentication services.
class AuthService: class AuthService:
# [DEF:AuthService.__init__:Function] # [DEF:AuthService_init:Function]
# @COMPLEXITY: 1 # @COMPLEXITY: 1
# @PURPOSE: Initializes the authentication service with repository access over an active DB session. # @PURPOSE: Initializes the authentication service with repository access over an active DB session.
# @PRE: db is a valid SQLAlchemy Session instance bound to the auth persistence context. # @PRE: db is a valid SQLAlchemy Session instance bound to the auth persistence context.
@@ -41,9 +41,9 @@ class AuthService:
def __init__(self, db: Session): def __init__(self, db: Session):
self.db = db self.db = db
self.repo = AuthRepository(db) self.repo = AuthRepository(db)
# [/DEF:AuthService.__init__:Function] # [/DEF:AuthService_init:Function]
# [DEF:AuthService.authenticate_user:Function] # [DEF:authenticate_user:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Validates credentials and account state for local username/password authentication. # @PURPOSE: Validates credentials and account state for local username/password authentication.
# @PRE: username and password are non-empty credential inputs. # @PRE: username and password are non-empty credential inputs.
@@ -68,9 +68,9 @@ class AuthService:
self.db.refresh(user) self.db.refresh(user)
return user return user
# [/DEF:AuthService.authenticate_user:Function] # [/DEF:authenticate_user:Function]
# [DEF:AuthService.create_session:Function] # [DEF:create_session:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Issues an access token payload for an already authenticated user. # @PURPOSE: Issues an access token payload for an already authenticated user.
# @PRE: user is a valid User entity containing username and iterable roles with role.name values. # @PRE: user is a valid User entity containing username and iterable roles with role.name values.
@@ -86,9 +86,9 @@ class AuthService:
data={"sub": user.username, "scopes": roles} data={"sub": user.username, "scopes": roles}
) )
return {"access_token": access_token, "token_type": "bearer"} return {"access_token": access_token, "token_type": "bearer"}
# [/DEF:AuthService.create_session:Function] # [/DEF:create_session:Function]
# [DEF:AuthService.provision_adfs_user:Function] # [DEF:provision_adfs_user:Function]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @PURPOSE: Performs ADFS Just-In-Time provisioning and role synchronization from AD group mappings. # @PURPOSE: Performs ADFS Just-In-Time provisioning and role synchronization from AD group mappings.
# @PRE: user_info contains identity claims where at least one of 'upn' or 'email' is present; 'groups' may be absent. # @PRE: user_info contains identity claims where at least one of 'upn' or 'email' is present; 'groups' may be absent.
@@ -125,7 +125,7 @@ class AuthService:
self.db.refresh(user) self.db.refresh(user)
return user return user
# [/DEF:AuthService.provision_adfs_user:Function] # [/DEF:provision_adfs_user:Function]
# [/DEF:AuthService:Class] # [/DEF:AuthService:Class]
# [/DEF:backend.src.services.auth_service:Module] # [/DEF:auth_service:Module]

View File

@@ -1,27 +1,53 @@
# [DEF:backend.tests.services.clean_release.test_audit_service:Module] # [DEF:TestAuditService:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, audit, logging # @SEMANTICS: tests, clean-release, audit, logging
# @PURPOSE: Validate audit hooks emit expected log patterns for clean release lifecycle. # @PURPOSE: Validate audit hooks emit expected log patterns for clean release lifecycle.
# @LAYER: Infra # @LAYER: Infra
# @RELATION: TESTS -> backend.src.services.clean_release.audit_service
from unittest.mock import patch from unittest.mock import patch
from src.services.clean_release.audit_service import audit_preparation, audit_check_run, audit_report from src.services.clean_release.audit_service import (
audit_preparation,
audit_check_run,
audit_report,
)
@patch("src.services.clean_release.audit_service.logger") @patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_preparation:Function]
# @RELATION: BINDS_TO -> TestAuditService
def test_audit_preparation(mock_logger): def test_audit_preparation(mock_logger):
audit_preparation("cand-1", "PREPARED") audit_preparation("cand-1", "PREPARED")
mock_logger.info.assert_called_with("[REASON] clean-release preparation candidate=cand-1 status=PREPARED") mock_logger.info.assert_called_with(
"[REASON] clean-release preparation candidate=cand-1 status=PREPARED"
)
# [/DEF:test_audit_preparation:Function]
@patch("src.services.clean_release.audit_service.logger") @patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_check_run:Function]
# @RELATION: BINDS_TO -> TestAuditService
def test_audit_check_run(mock_logger): def test_audit_check_run(mock_logger):
audit_check_run("check-1", "COMPLIANT") audit_check_run("check-1", "COMPLIANT")
mock_logger.info.assert_called_with("[REFLECT] clean-release check_run=check-1 final_status=COMPLIANT") mock_logger.info.assert_called_with(
"[REFLECT] clean-release check_run=check-1 final_status=COMPLIANT"
)
# [/DEF:test_audit_check_run:Function]
@patch("src.services.clean_release.audit_service.logger") @patch("src.services.clean_release.audit_service.logger")
# [DEF:test_audit_report:Function]
# @RELATION: BINDS_TO -> TestAuditService
def test_audit_report(mock_logger): def test_audit_report(mock_logger):
audit_report("rep-1", "cand-1") audit_report("rep-1", "cand-1")
mock_logger.info.assert_called_with("[EXPLORE] clean-release report_id=rep-1 candidate=cand-1") mock_logger.info.assert_called_with(
"[EXPLORE] clean-release report_id=rep-1 candidate=cand-1"
)
# [/DEF:backend.tests.services.clean_release.test_audit_service:Module] # [/DEF:test_audit_report:Function]
# [/DEF:TestAuditService:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_compliance_orchestrator:Module] # [DEF:TestComplianceOrchestrator:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, orchestrator, stage-state-machine # @SEMANTICS: tests, clean-release, orchestrator, stage-state-machine
# @PURPOSE: Validate compliance orchestrator stage transitions and final status derivation. # @PURPOSE: Validate compliance orchestrator stage transitions and final status derivation.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.clean_release.compliance_orchestrator
# @INVARIANT: Failed mandatory stage forces BLOCKED terminal status. # @INVARIANT: Failed mandatory stage forces BLOCKED terminal status.
from unittest.mock import patch from unittest.mock import patch
@@ -22,6 +22,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:test_orchestrator_stage_failure_blocks_release:Function] # [DEF:test_orchestrator_stage_failure_blocks_release:Function]
# @RELATION: BINDS_TO -> TestComplianceOrchestrator
# @PURPOSE: Verify mandatory stage failure forces BLOCKED final status. # @PURPOSE: Verify mandatory stage failure forces BLOCKED final status.
def test_orchestrator_stage_failure_blocks_release(): def test_orchestrator_stage_failure_blocks_release():
repository = CleanReleaseRepository() repository = CleanReleaseRepository()
@@ -49,6 +50,7 @@ def test_orchestrator_stage_failure_blocks_release():
# [DEF:test_orchestrator_compliant_candidate:Function] # [DEF:test_orchestrator_compliant_candidate:Function]
# @RELATION: BINDS_TO -> TestComplianceOrchestrator
# @PURPOSE: Verify happy path where all mandatory stages pass yields COMPLIANT. # @PURPOSE: Verify happy path where all mandatory stages pass yields COMPLIANT.
def test_orchestrator_compliant_candidate(): def test_orchestrator_compliant_candidate():
repository = CleanReleaseRepository() repository = CleanReleaseRepository()
@@ -76,6 +78,7 @@ def test_orchestrator_compliant_candidate():
# [DEF:test_orchestrator_missing_stage_result:Function] # [DEF:test_orchestrator_missing_stage_result:Function]
# @RELATION: BINDS_TO -> TestComplianceOrchestrator
# @PURPOSE: Verify incomplete mandatory stage set cannot end as COMPLIANT and results in FAILED. # @PURPOSE: Verify incomplete mandatory stage set cannot end as COMPLIANT and results in FAILED.
def test_orchestrator_missing_stage_result(): def test_orchestrator_missing_stage_result():
repository = CleanReleaseRepository() repository = CleanReleaseRepository()
@@ -93,6 +96,7 @@ def test_orchestrator_missing_stage_result():
# [DEF:test_orchestrator_report_generation_error:Function] # [DEF:test_orchestrator_report_generation_error:Function]
# @RELATION: BINDS_TO -> TestComplianceOrchestrator
# @PURPOSE: Verify downstream report errors do not mutate orchestrator final status. # @PURPOSE: Verify downstream report errors do not mutate orchestrator final status.
def test_orchestrator_report_generation_error(): def test_orchestrator_report_generation_error():
repository = CleanReleaseRepository() repository = CleanReleaseRepository()
@@ -109,4 +113,4 @@ def test_orchestrator_report_generation_error():
assert run.final_status == CheckFinalStatus.FAILED assert run.final_status == CheckFinalStatus.FAILED
# [/DEF:test_orchestrator_report_generation_error:Function] # [/DEF:test_orchestrator_report_generation_error:Function]
# [/DEF:backend.tests.services.clean_release.test_compliance_orchestrator:Module] # [/DEF:TestComplianceOrchestrator:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.services.clean_release.test_manifest_builder:Module] # [DEF:TestManifestBuilder:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: tests, clean-release, manifest, deterministic # @SEMANTICS: tests, clean-release, manifest, deterministic
# @PURPOSE: Validate deterministic manifest generation behavior for US1. # @PURPOSE: Validate deterministic manifest generation behavior for US1.
@@ -10,6 +10,7 @@ from src.services.clean_release.manifest_builder import build_distribution_manif
# [DEF:test_manifest_deterministic_hash_for_same_input:Function] # [DEF:test_manifest_deterministic_hash_for_same_input:Function]
# @RELATION: BINDS_TO -> TestManifestBuilder
# @PURPOSE: Ensure hash is stable for same candidate/policy/artifact input. # @PURPOSE: Ensure hash is stable for same candidate/policy/artifact input.
# @PRE: Same input lists are passed twice. # @PRE: Same input lists are passed twice.
# @POST: Hash and summary remain identical. # @POST: Hash and summary remain identical.
@@ -38,4 +39,4 @@ def test_manifest_deterministic_hash_for_same_input():
assert manifest1.summary.included_count == manifest2.summary.included_count assert manifest1.summary.included_count == manifest2.summary.included_count
assert manifest1.summary.excluded_count == manifest2.summary.excluded_count assert manifest1.summary.excluded_count == manifest2.summary.excluded_count
# [/DEF:test_manifest_deterministic_hash_for_same_input:Function] # [/DEF:test_manifest_deterministic_hash_for_same_input:Function]
# [/DEF:backend.tests.services.clean_release.test_manifest_builder:Module] # [/DEF:TestManifestBuilder:Module]

View File

@@ -40,6 +40,8 @@ def enterprise_clean_setup():
return policy, registry return policy, registry
# @TEST_SCENARIO: policy_valid # @TEST_SCENARIO: policy_valid
# [DEF:test_policy_valid:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_policy_valid(enterprise_clean_setup): def test_policy_valid(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry) engine = CleanPolicyEngine(policy, registry)
@@ -48,6 +50,10 @@ def test_policy_valid(enterprise_clean_setup):
assert not result.blocking_reasons assert not result.blocking_reasons
# @TEST_EDGE: missing_registry_ref # @TEST_EDGE: missing_registry_ref
# [/DEF:test_policy_valid:Function]
# [DEF:test_missing_registry_ref:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_missing_registry_ref(enterprise_clean_setup): def test_missing_registry_ref(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
policy.internal_source_registry_ref = " " policy.internal_source_registry_ref = " "
@@ -57,6 +63,10 @@ def test_missing_registry_ref(enterprise_clean_setup):
assert "Policy missing internal_source_registry_ref" in result.blocking_reasons assert "Policy missing internal_source_registry_ref" in result.blocking_reasons
# @TEST_EDGE: conflicting_registry # @TEST_EDGE: conflicting_registry
# [/DEF:test_missing_registry_ref:Function]
# [DEF:test_conflicting_registry:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_conflicting_registry(enterprise_clean_setup): def test_conflicting_registry(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
registry.registry_id = "WRONG-REG" registry.registry_id = "WRONG-REG"
@@ -66,6 +76,10 @@ def test_conflicting_registry(enterprise_clean_setup):
assert "Policy registry ref does not match provided registry" in result.blocking_reasons assert "Policy registry ref does not match provided registry" in result.blocking_reasons
# @TEST_INVARIANT: deterministic_classification # @TEST_INVARIANT: deterministic_classification
# [/DEF:test_conflicting_registry:Function]
# [DEF:test_classify_artifact:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_classify_artifact(enterprise_clean_setup): def test_classify_artifact(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry) engine = CleanPolicyEngine(policy, registry)
@@ -78,6 +92,10 @@ def test_classify_artifact(enterprise_clean_setup):
assert engine.classify_artifact({"category": "others", "path": "p3"}) == "allowed" assert engine.classify_artifact({"category": "others", "path": "p3"}) == "allowed"
# @TEST_EDGE: external_endpoint # @TEST_EDGE: external_endpoint
# [/DEF:test_classify_artifact:Function]
# [DEF:test_validate_resource_source:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_validate_resource_source(enterprise_clean_setup): def test_validate_resource_source(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry) engine = CleanPolicyEngine(policy, registry)
@@ -92,6 +110,10 @@ def test_validate_resource_source(enterprise_clean_setup):
assert res_fail.violation["category"] == "external-source" assert res_fail.violation["category"] == "external-source"
assert res_fail.violation["blocked_release"] is True assert res_fail.violation["blocked_release"] is True
# [/DEF:test_validate_resource_source:Function]
# [DEF:test_evaluate_candidate:Function]
# @RELATION: BINDS_TO -> __tests__/test_policy_engine
def test_evaluate_candidate(enterprise_clean_setup): def test_evaluate_candidate(enterprise_clean_setup):
policy, registry = enterprise_clean_setup policy, registry = enterprise_clean_setup
engine = CleanPolicyEngine(policy, registry) engine = CleanPolicyEngine(policy, registry)
@@ -112,3 +134,4 @@ def test_evaluate_candidate(enterprise_clean_setup):
assert len(violations) == 2 assert len(violations) == 2
assert violations[0]["category"] == "data-purity" assert violations[0]["category"] == "data-purity"
assert violations[1]["category"] == "external-source" assert violations[1]["category"] == "external-source"
# [/DEF:test_evaluate_candidate:Function]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.tests.services.clean_release.test_preparation_service:Module] # [DEF:TestPreparationService:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, preparation, flow # @SEMANTICS: tests, clean-release, preparation, flow
# @PURPOSE: Validate release candidate preparation flow, including policy evaluation and manifest persisting. # @PURPOSE: Validate release candidate preparation flow, including policy evaluation and manifest persisting.
@@ -17,11 +17,13 @@ from src.models.clean_release import (
ReleaseCandidate, ReleaseCandidate,
ReleaseCandidateStatus, ReleaseCandidateStatus,
ProfileType, ProfileType,
DistributionManifest DistributionManifest,
) )
from src.services.clean_release.preparation_service import prepare_candidate from src.services.clean_release.preparation_service import prepare_candidate
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_policy:Function]
# [DEF:_mock_policy:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Build a valid clean profile policy fixture for preparation tests. # @PURPOSE: Build a valid clean profile policy fixture for preparation tests.
def _mock_policy() -> CleanProfilePolicy: def _mock_policy() -> CleanProfilePolicy:
return CleanProfilePolicy( return CleanProfilePolicy(
@@ -35,21 +37,37 @@ def _mock_policy() -> CleanProfilePolicy:
effective_from=datetime.now(timezone.utc), effective_from=datetime.now(timezone.utc),
profile=ProfileType.ENTERPRISE_CLEAN, profile=ProfileType.ENTERPRISE_CLEAN,
) )
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_policy:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_registry:Function]
# [/DEF:_mock_policy:Function]
# [DEF:_mock_registry:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Build an internal-only source registry fixture for preparation tests. # @PURPOSE: Build an internal-only source registry fixture for preparation tests.
def _mock_registry() -> ResourceSourceRegistry: def _mock_registry() -> ResourceSourceRegistry:
return ResourceSourceRegistry( return ResourceSourceRegistry(
registry_id="reg-1", registry_id="reg-1",
name="Reg", name="Reg",
entries=[ResourceSourceEntry(source_id="s1", host="nexus.internal", protocol="https", purpose="pkg", enabled=True)], entries=[
updated_at=datetime.now(timezone.utc), ResourceSourceEntry(
updated_by="tester" source_id="s1",
host="nexus.internal",
protocol="https",
purpose="pkg",
enabled=True,
)
],
updated_at=datetime.now(timezone.utc),
updated_by="tester",
) )
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_registry:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service._mock_candidate:Function]
# [/DEF:_mock_registry:Function]
# [DEF:_mock_candidate:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Build a draft release candidate fixture with provided identifier. # @PURPOSE: Build a draft release candidate fixture with provided identifier.
def _mock_candidate(candidate_id: str) -> ReleaseCandidate: def _mock_candidate(candidate_id: str) -> ReleaseCandidate:
return ReleaseCandidate( return ReleaseCandidate(
@@ -59,11 +77,15 @@ def _mock_candidate(candidate_id: str) -> ReleaseCandidate:
created_at=datetime.now(timezone.utc), created_at=datetime.now(timezone.utc),
status=ReleaseCandidateStatus.DRAFT, status=ReleaseCandidateStatus.DRAFT,
created_by="tester", created_by="tester",
source_snapshot_ref="v1.0.0-snapshot" source_snapshot_ref="v1.0.0-snapshot",
) )
# [/DEF:backend.tests.services.clean_release.test_preparation_service._mock_candidate:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_success:Function]
# [/DEF:_mock_candidate:Function]
# [DEF:test_prepare_candidate_success:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Verify candidate transitions to PREPARED when evaluation returns no violations. # @PURPOSE: Verify candidate transitions to PREPARED when evaluation returns no violations.
# @TEST_CONTRACT: [valid_candidate + active_policy + internal_sources + no_violations] -> [status=PREPARED, manifest_persisted, candidate_saved] # @TEST_CONTRACT: [valid_candidate + active_policy + internal_sources + no_violations] -> [status=PREPARED, manifest_persisted, candidate_saved]
# @TEST_SCENARIO: [prepare_success] -> [prepared status and persistence side effects are produced] # @TEST_SCENARIO: [prepare_success] -> [prepared status and persistence side effects are produced]
@@ -83,24 +105,39 @@ def test_prepare_candidate_success():
sources = ["nexus.internal"] sources = ["nexus.internal"]
# Execute # Execute
with patch("src.services.clean_release.preparation_service.CleanPolicyEngine") as MockEngine: with patch(
"src.services.clean_release.preparation_service.CleanPolicyEngine"
) as MockEngine:
mock_engine_instance = MockEngine.return_value mock_engine_instance = MockEngine.return_value
mock_engine_instance.validate_policy.return_value.ok = True mock_engine_instance.validate_policy.return_value.ok = True
mock_engine_instance.evaluate_candidate.return_value = ( mock_engine_instance.evaluate_candidate.return_value = (
[{"path": "file1.txt", "category": "system", "classification": "required-system", "reason": "system-core"}], [
[] {
"path": "file1.txt",
"category": "system",
"classification": "required-system",
"reason": "system-core",
}
],
[],
) )
result = prepare_candidate(repository, candidate_id, artifacts, sources, "operator-1") result = prepare_candidate(
repository, candidate_id, artifacts, sources, "operator-1"
)
# Verify # Verify
assert result["status"] == ReleaseCandidateStatus.PREPARED.value assert result["status"] == ReleaseCandidateStatus.PREPARED.value
assert candidate.status == ReleaseCandidateStatus.PREPARED assert candidate.status == ReleaseCandidateStatus.PREPARED
repository.save_manifest.assert_called_once() repository.save_manifest.assert_called_once()
repository.save_candidate.assert_called_with(candidate) repository.save_candidate.assert_called_with(candidate)
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_success:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_with_violations:Function]
# [/DEF:test_prepare_candidate_success:Function]
# [DEF:test_prepare_candidate_with_violations:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Verify candidate transitions to BLOCKED when evaluation returns blocking violations. # @PURPOSE: Verify candidate transitions to BLOCKED when evaluation returns blocking violations.
# @TEST_CONTRACT: [valid_candidate + active_policy + evaluation_with_violations] -> [status=BLOCKED, violations_exposed] # @TEST_CONTRACT: [valid_candidate + active_policy + evaluation_with_violations] -> [status=BLOCKED, violations_exposed]
# @TEST_SCENARIO: [prepare_blocked_due_to_policy] -> [blocked status and violation list are produced] # @TEST_SCENARIO: [prepare_blocked_due_to_policy] -> [blocked status and violation list are produced]
@@ -120,23 +157,38 @@ def test_prepare_candidate_with_violations():
sources = [] sources = []
# Execute # Execute
with patch("src.services.clean_release.preparation_service.CleanPolicyEngine") as MockEngine: with patch(
"src.services.clean_release.preparation_service.CleanPolicyEngine"
) as MockEngine:
mock_engine_instance = MockEngine.return_value mock_engine_instance = MockEngine.return_value
mock_engine_instance.validate_policy.return_value.ok = True mock_engine_instance.validate_policy.return_value.ok = True
mock_engine_instance.evaluate_candidate.return_value = ( mock_engine_instance.evaluate_candidate.return_value = (
[{"path": "bad.txt", "category": "prohibited", "classification": "excluded-prohibited", "reason": "test-data"}], [
[{"category": "data-purity", "blocked_release": True}] {
"path": "bad.txt",
"category": "prohibited",
"classification": "excluded-prohibited",
"reason": "test-data",
}
],
[{"category": "data-purity", "blocked_release": True}],
) )
result = prepare_candidate(repository, candidate_id, artifacts, sources, "operator-1") result = prepare_candidate(
repository, candidate_id, artifacts, sources, "operator-1"
)
# Verify # Verify
assert result["status"] == ReleaseCandidateStatus.BLOCKED.value assert result["status"] == ReleaseCandidateStatus.BLOCKED.value
assert candidate.status == ReleaseCandidateStatus.BLOCKED assert candidate.status == ReleaseCandidateStatus.BLOCKED
assert len(result["violations"]) == 1 assert len(result["violations"]) == 1
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_with_violations:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_not_found:Function]
# [/DEF:test_prepare_candidate_with_violations:Function]
# [DEF:test_prepare_candidate_not_found:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Verify preparation raises ValueError when candidate does not exist. # @PURPOSE: Verify preparation raises ValueError when candidate does not exist.
# @TEST_CONTRACT: [missing_candidate] -> [ValueError('Candidate not found')] # @TEST_CONTRACT: [missing_candidate] -> [ValueError('Candidate not found')]
# @TEST_SCENARIO: [prepare_missing_candidate] -> [raises candidate not found error] # @TEST_SCENARIO: [prepare_missing_candidate] -> [raises candidate not found error]
@@ -149,9 +201,13 @@ def test_prepare_candidate_not_found():
with pytest.raises(ValueError, match="Candidate not found"): with pytest.raises(ValueError, match="Candidate not found"):
prepare_candidate(repository, "non-existent", [], [], "op") prepare_candidate(repository, "non-existent", [], [], "op")
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_not_found:Function]
# [DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_no_active_policy:Function]
# [/DEF:test_prepare_candidate_not_found:Function]
# [DEF:test_prepare_candidate_no_active_policy:Function]
# @RELATION: BINDS_TO -> TestPreparationService
# @PURPOSE: Verify preparation raises ValueError when no active policy is available. # @PURPOSE: Verify preparation raises ValueError when no active policy is available.
# @TEST_CONTRACT: [candidate_present + missing_active_policy] -> [ValueError('Active clean policy not found')] # @TEST_CONTRACT: [candidate_present + missing_active_policy] -> [ValueError('Active clean policy not found')]
# @TEST_SCENARIO: [prepare_missing_policy] -> [raises active policy missing error] # @TEST_SCENARIO: [prepare_missing_policy] -> [raises active policy missing error]
@@ -165,7 +221,9 @@ def test_prepare_candidate_no_active_policy():
with pytest.raises(ValueError, match="Active clean policy not found"): with pytest.raises(ValueError, match="Active clean policy not found"):
prepare_candidate(repository, "cand-1", [], [], "op") prepare_candidate(repository, "cand-1", [], [], "op")
# [/DEF:backend.tests.services.clean_release.test_preparation_service.test_prepare_candidate_no_active_policy:Function]
# [/DEF:backend.tests.services.clean_release.test_preparation_service:Module] # [/DEF:test_prepare_candidate_no_active_policy:Function]
# [/DEF:TestPreparationService:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_report_builder:Module] # [DEF:TestReportBuilder:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, report-builder, counters # @SEMANTICS: tests, clean-release, report-builder, counters
# @PURPOSE: Validate compliance report builder counter integrity and blocked-run constraints. # @PURPOSE: Validate compliance report builder counter integrity and blocked-run constraints.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.clean_release.report_builder
# @INVARIANT: blocked run requires at least one blocking violation. # @INVARIANT: blocked run requires at least one blocking violation.
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -23,6 +23,7 @@ from src.services.clean_release.repository import CleanReleaseRepository
# [DEF:_terminal_run:Function] # [DEF:_terminal_run:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Build terminal/non-terminal run fixtures for report builder tests. # @PURPOSE: Build terminal/non-terminal run fixtures for report builder tests.
def _terminal_run(status: CheckFinalStatus) -> ComplianceCheckRun: def _terminal_run(status: CheckFinalStatus) -> ComplianceCheckRun:
return ComplianceCheckRun( return ComplianceCheckRun(
@@ -40,6 +41,7 @@ def _terminal_run(status: CheckFinalStatus) -> ComplianceCheckRun:
# [DEF:_blocking_violation:Function] # [DEF:_blocking_violation:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Build a blocking violation fixture for blocked report scenarios. # @PURPOSE: Build a blocking violation fixture for blocked report scenarios.
def _blocking_violation() -> ComplianceViolation: def _blocking_violation() -> ComplianceViolation:
return ComplianceViolation( return ComplianceViolation(
@@ -56,6 +58,7 @@ def _blocking_violation() -> ComplianceViolation:
# [DEF:test_report_builder_blocked_requires_blocking_violations:Function] # [DEF:test_report_builder_blocked_requires_blocking_violations:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Verify BLOCKED run requires at least one blocking violation. # @PURPOSE: Verify BLOCKED run requires at least one blocking violation.
def test_report_builder_blocked_requires_blocking_violations(): def test_report_builder_blocked_requires_blocking_violations():
builder = ComplianceReportBuilder(CleanReleaseRepository()) builder = ComplianceReportBuilder(CleanReleaseRepository())
@@ -67,6 +70,7 @@ def test_report_builder_blocked_requires_blocking_violations():
# [DEF:test_report_builder_blocked_with_two_violations:Function] # [DEF:test_report_builder_blocked_with_two_violations:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Verify report builder generates conformant payload for a BLOCKED run with violations. # @PURPOSE: Verify report builder generates conformant payload for a BLOCKED run with violations.
def test_report_builder_blocked_with_two_violations(): def test_report_builder_blocked_with_two_violations():
builder = ComplianceReportBuilder(CleanReleaseRepository()) builder = ComplianceReportBuilder(CleanReleaseRepository())
@@ -87,6 +91,7 @@ def test_report_builder_blocked_with_two_violations():
# [DEF:test_report_builder_counter_consistency:Function] # [DEF:test_report_builder_counter_consistency:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Verify violations counters remain consistent for blocking payload. # @PURPOSE: Verify violations counters remain consistent for blocking payload.
def test_report_builder_counter_consistency(): def test_report_builder_counter_consistency():
builder = ComplianceReportBuilder(CleanReleaseRepository()) builder = ComplianceReportBuilder(CleanReleaseRepository())
@@ -99,6 +104,7 @@ def test_report_builder_counter_consistency():
# [DEF:test_missing_operator_summary:Function] # [DEF:test_missing_operator_summary:Function]
# @RELATION: BINDS_TO -> TestReportBuilder
# @PURPOSE: Validate non-terminal run prevents operator summary/report generation. # @PURPOSE: Validate non-terminal run prevents operator summary/report generation.
def test_missing_operator_summary(): def test_missing_operator_summary():
builder = ComplianceReportBuilder(CleanReleaseRepository()) builder = ComplianceReportBuilder(CleanReleaseRepository())
@@ -109,4 +115,4 @@ def test_missing_operator_summary():
assert "Cannot build report for non-terminal run" in str(exc.value) assert "Cannot build report for non-terminal run" in str(exc.value)
# [/DEF:test_missing_operator_summary:Function] # [/DEF:test_missing_operator_summary:Function]
# [/DEF:backend.tests.services.clean_release.test_report_builder:Module] # [/DEF:TestReportBuilder:Module]

View File

@@ -1,9 +1,9 @@
# [DEF:backend.tests.services.clean_release.test_source_isolation:Module] # [DEF:TestSourceIsolation:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, source-isolation, internal-only # @SEMANTICS: tests, clean-release, source-isolation, internal-only
# @PURPOSE: Verify internal source registry validation behavior. # @PURPOSE: Verify internal source registry validation behavior.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.clean_release.source_isolation
# @INVARIANT: External endpoints always produce blocking violations. # @INVARIANT: External endpoints always produce blocking violations.
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -12,6 +12,8 @@ from src.models.clean_release import ResourceSourceEntry, ResourceSourceRegistry
from src.services.clean_release.source_isolation import validate_internal_sources from src.services.clean_release.source_isolation import validate_internal_sources
# [DEF:_registry:Function]
# @RELATION: BINDS_TO -> TestSourceIsolation
def _registry() -> ResourceSourceRegistry: def _registry() -> ResourceSourceRegistry:
return ResourceSourceRegistry( return ResourceSourceRegistry(
registry_id="registry-internal-v1", registry_id="registry-internal-v1",
@@ -38,6 +40,11 @@ def _registry() -> ResourceSourceRegistry:
) )
# [/DEF:_registry:Function]
# [DEF:test_validate_internal_sources_all_internal_ok:Function]
# @RELATION: BINDS_TO -> TestSourceIsolation
def test_validate_internal_sources_all_internal_ok(): def test_validate_internal_sources_all_internal_ok():
result = validate_internal_sources( result = validate_internal_sources(
registry=_registry(), registry=_registry(),
@@ -47,6 +54,11 @@ def test_validate_internal_sources_all_internal_ok():
assert result["violations"] == [] assert result["violations"] == []
# [/DEF:test_validate_internal_sources_all_internal_ok:Function]
# [DEF:test_validate_internal_sources_external_blocked:Function]
# @RELATION: BINDS_TO -> TestSourceIsolation
def test_validate_internal_sources_external_blocked(): def test_validate_internal_sources_external_blocked():
result = validate_internal_sources( result = validate_internal_sources(
registry=_registry(), registry=_registry(),
@@ -57,4 +69,6 @@ def test_validate_internal_sources_external_blocked():
assert result["violations"][0]["category"] == "external-source" assert result["violations"][0]["category"] == "external-source"
assert result["violations"][0]["blocked_release"] is True assert result["violations"][0]["blocked_release"] is True
# [/DEF:backend.tests.services.clean_release.test_source_isolation:Module]
# [/DEF:test_validate_internal_sources_external_blocked:Function]
# [/DEF:TestSourceIsolation:Module]

View File

@@ -1,30 +1,70 @@
# [DEF:backend.tests.services.clean_release.test_stages:Module] # [DEF:TestStages:Module]
# @RELATION: BELONGS_TO -> SrcRoot
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: tests, clean-release, compliance, stages # @SEMANTICS: tests, clean-release, compliance, stages
# @PURPOSE: Validate final status derivation logic from stage results. # @PURPOSE: Validate final status derivation logic from stage results.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: TESTS -> backend.src.services.clean_release.stages
from src.models.clean_release import CheckFinalStatus, CheckStageName, CheckStageResult, CheckStageStatus from src.models.clean_release import (
CheckFinalStatus,
CheckStageName,
CheckStageResult,
CheckStageStatus,
)
from src.services.clean_release.stages import derive_final_status, MANDATORY_STAGE_ORDER from src.services.clean_release.stages import derive_final_status, MANDATORY_STAGE_ORDER
# [DEF:test_derive_final_status_compliant:Function]
# @RELATION: BINDS_TO -> TestStages
def test_derive_final_status_compliant(): def test_derive_final_status_compliant():
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER] results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")
for s in MANDATORY_STAGE_ORDER
]
assert derive_final_status(results) == CheckFinalStatus.COMPLIANT assert derive_final_status(results) == CheckFinalStatus.COMPLIANT
# [/DEF:test_derive_final_status_compliant:Function]
# [DEF:test_derive_final_status_blocked:Function]
# @RELATION: BINDS_TO -> TestStages
def test_derive_final_status_blocked(): def test_derive_final_status_blocked():
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER] results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")
for s in MANDATORY_STAGE_ORDER
]
results[1].status = CheckStageStatus.FAIL results[1].status = CheckStageStatus.FAIL
assert derive_final_status(results) == CheckFinalStatus.BLOCKED assert derive_final_status(results) == CheckFinalStatus.BLOCKED
# [/DEF:test_derive_final_status_blocked:Function]
# [DEF:test_derive_final_status_failed_missing:Function]
# @RELATION: BINDS_TO -> TestStages
def test_derive_final_status_failed_missing(): def test_derive_final_status_failed_missing():
results = [CheckStageResult(stage=MANDATORY_STAGE_ORDER[0], status=CheckStageStatus.PASS, details="ok")] results = [
CheckStageResult(
stage=MANDATORY_STAGE_ORDER[0], status=CheckStageStatus.PASS, details="ok"
)
]
assert derive_final_status(results) == CheckFinalStatus.FAILED assert derive_final_status(results) == CheckFinalStatus.FAILED
# [/DEF:test_derive_final_status_failed_missing:Function]
# [DEF:test_derive_final_status_failed_skipped:Function]
# @RELATION: BINDS_TO -> TestStages
def test_derive_final_status_failed_skipped(): def test_derive_final_status_failed_skipped():
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER] results = [
CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok")
for s in MANDATORY_STAGE_ORDER
]
results[2].status = CheckStageStatus.SKIPPED results[2].status = CheckStageStatus.SKIPPED
assert derive_final_status(results) == CheckFinalStatus.FAILED assert derive_final_status(results) == CheckFinalStatus.FAILED
# [/DEF:backend.tests.services.clean_release.test_stages:Module] # [/DEF:test_derive_final_status_failed_skipped:Function]
# [/DEF:TestStages:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.approval_service:Module] # [DEF:approval_service:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: clean-release, approval, decision, lifecycle, gate # @SEMANTICS: clean-release, approval, decision, lifecycle, gate
# @PURPOSE: Enforce approval/rejection gates over immutable compliance reports. # @PURPOSE: Enforce approval/rejection gates over immutable compliance reports.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.artifact_catalog_loader:Module] # [DEF:artifact_catalog_loader:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: clean-release, artifacts, bootstrap, json, tui # @SEMANTICS: clean-release, artifacts, bootstrap, json, tui
# @PURPOSE: Load bootstrap artifact catalogs for clean release real-mode flows. # @PURPOSE: Load bootstrap artifact catalogs for clean release real-mode flows.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.audit_service:Module] # [DEF:audit_service:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: clean-release, audit, lifecycle, logging # @SEMANTICS: clean-release, audit, lifecycle, logging
# @PURPOSE: Provide lightweight audit hooks for clean release preparation/check/report lifecycle. # @PURPOSE: Provide lightweight audit hooks for clean release preparation/check/report lifecycle.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.candidate_service:Module] # [DEF:candidate_service:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: clean-release, candidate, artifacts, lifecycle, validation # @SEMANTICS: clean-release, candidate, artifacts, lifecycle, validation
# @PURPOSE: Register release candidates with validated artifacts and advance lifecycle through legal transitions. # @PURPOSE: Register release candidates with validated artifacts and advance lifecycle through legal transitions.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.compliance_execution_service:Module] # [DEF:compliance_execution_service:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: clean-release, compliance, execution, stages, immutable-evidence # @SEMANTICS: clean-release, compliance, execution, stages, immutable-evidence
# @PURPOSE: Create and execute compliance runs with trusted snapshots, deterministic stages, violations and immutable report persistence. # @PURPOSE: Create and execute compliance runs with trusted snapshots, deterministic stages, violations and immutable report persistence.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.compliance_orchestrator:Module] # [DEF:compliance_orchestrator:Module]
# @COMPLEXITY: 5 # @COMPLEXITY: 5
# @SEMANTICS: clean-release, orchestrator, compliance-gate, stages # @SEMANTICS: clean-release, orchestrator, compliance-gate, stages
# @PURPOSE: Execute mandatory clean compliance stages and produce final COMPLIANT/BLOCKED/FAILED outcome. # @PURPOSE: Execute mandatory clean compliance stages and produce final COMPLIANT/BLOCKED/FAILED outcome.
@@ -42,7 +42,7 @@ from ...core.logger import belief_scope, logger
# [DEF:CleanComplianceOrchestrator:Class] # [DEF:CleanComplianceOrchestrator:Class]
# @PURPOSE: Coordinate clean-release compliance verification stages. # @PURPOSE: Coordinate clean-release compliance verification stages.
class CleanComplianceOrchestrator: class CleanComplianceOrchestrator:
# [DEF:CleanComplianceOrchestrator.__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Bind repository dependency used for orchestrator persistence and lookups. # @PURPOSE: Bind repository dependency used for orchestrator persistence and lookups.
# @PRE: repository is a valid CleanReleaseRepository instance with required methods. # @PRE: repository is a valid CleanReleaseRepository instance with required methods.
# @POST: self.repository is assigned and used by all orchestration steps. # @POST: self.repository is assigned and used by all orchestration steps.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.demo_data_service:Module] # [DEF:demo_data_service:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: clean-release, demo-mode, namespace, isolation, repository # @SEMANTICS: clean-release, demo-mode, namespace, isolation, repository
# @PURPOSE: Provide deterministic namespace helpers and isolated in-memory repository creation for demo and real modes. # @PURPOSE: Provide deterministic namespace helpers and isolated in-memory repository creation for demo and real modes.

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.services.clean_release.manifest_builder:Module] # [DEF:manifest_builder:Module]
# @COMPLEXITY: 3 # @COMPLEXITY: 3
# @SEMANTICS: clean-release, manifest, deterministic-hash, summary # @SEMANTICS: clean-release, manifest, deterministic-hash, summary
# @PURPOSE: Build deterministic distribution manifest from classified artifact input. # @PURPOSE: Build deterministic distribution manifest from classified artifact input.

Some files were not shown because too many files have changed in this diff Show More