diff --git a/backend/src/api/routes/__tests__/test_git_api.py b/backend/src/api/routes/__tests__/test_git_api.py index 6a49fd86..e8ff79f3 100644 --- a/backend/src/api/routes/__tests__/test_git_api.py +++ b/backend/src/api/routes/__tests__/test_git_api.py @@ -1,5 +1,6 @@ -# [DEF:backend.src.api.routes.__tests__.test_git_api:Module] -# @RELATION: VERIFIES -> src.api.routes.git +# [DEF:TestGitApi:Module] +# @COMPLEXITY: 3 +# @RELATION: VERIFIES ->[src.api.routes.git] # @PURPOSE: API tests for Git configurations and repository operations. import pytest @@ -9,32 +10,52 @@ from fastapi import HTTPException from src.api.routes import git as git_routes from src.models.git import GitServerConfig, GitProvider, GitStatus, GitRepository + class DbMock: def __init__(self, data=None): self._data = data or [] self._deleted = [] self._added = [] + self._filtered = None def query(self, model): self._model = model + self._filtered = None return self def filter(self, condition): - # Simplistic mocking for tests, assuming equality checks - for item in self._data: - # We assume condition is an equality expression like GitServerConfig.id == "123" - # It's hard to eval the condition exactly in a mock without complex parsing, - # so we'll just return items where type matches. - pass + # Honor simple SQLAlchemy equality expressions used by these route tests. + candidates = [ + item + for item in self._data + if not hasattr(self, "_model") or isinstance(item, self._model) + ] + try: + left_key = getattr(getattr(condition, "left", None), "key", None) + right_value = getattr(getattr(condition, "right", None), "value", None) + if left_key is not None and right_value is not None: + self._filtered = [ + item + for item in candidates + if getattr(item, left_key, None) == right_value + ] + else: + self._filtered = candidates + except Exception: + self._filtered = candidates return self def first(self): + if self._filtered is not None: + return self._filtered[0] if self._filtered else None for item in self._data: if hasattr(self, "_model") and isinstance(item, self._model): return item return None def all(self): + if self._filtered is not None: + return list(self._filtered) return self._data def add(self, item): @@ -57,254 +78,410 @@ class DbMock: if not hasattr(item, "last_validated"): item.last_validated = "2026-03-08T00:00:00Z" + +# [DEF:test_get_git_configs_masks_pat:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_get_git_configs_masks_pat(): """ @PRE: Database session `db` is available. @POST: Returns a list of all GitServerConfig objects from the database with PAT masked. """ - db = DbMock([GitServerConfig( - id="config-1", name="Test Server", provider=GitProvider.GITHUB, - url="https://github.com", pat="secret-token", - status=GitStatus.CONNECTED, last_validated="2026-03-08T00:00:00Z" - )]) - + db = DbMock( + [ + GitServerConfig( + id="config-1", + name="Test Server", + provider=GitProvider.GITHUB, + url="https://github.com", + pat="secret-token", + status=GitStatus.CONNECTED, + last_validated="2026-03-08T00:00:00Z", + ) + ] + ) + result = asyncio.run(git_routes.get_git_configs(db=db)) - + assert len(result) == 1 assert result[0].pat == "********" assert result[0].name == "Test Server" + +# [/DEF:test_get_git_configs_masks_pat:Function] + + +# [DEF:test_create_git_config_persists_config:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_create_git_config_persists_config(): """ @PRE: `config` contains valid GitServerConfigCreate data. @POST: A new GitServerConfig record is created in the database. """ from src.api.routes.git_schemas import GitServerConfigCreate + db = DbMock() config = GitServerConfigCreate( - name="New Server", provider=GitProvider.GITLAB, - url="https://gitlab.com", pat="new-token", - default_branch="master" + name="New Server", + provider=GitProvider.GITLAB, + url="https://gitlab.com", + pat="new-token", + default_branch="master", ) - + result = asyncio.run(git_routes.create_git_config(config=config, db=db)) - + assert len(db._added) == 1 assert db._added[0].name == "New Server" assert db._added[0].pat == "new-token" assert result.name == "New Server" - assert result.pat == "new-token" # Note: route returns unmasked until serialized by FastAPI usually, but in tests schema might catch it or not. + assert ( + result.pat == "new-token" + ) # Note: route returns unmasked until serialized by FastAPI usually, but in tests schema might catch it or not. + + +# [/DEF:test_create_git_config_persists_config:Function] from src.api.routes.git_schemas import GitServerConfigUpdate + +# [DEF:test_update_git_config_modifies_record:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_update_git_config_modifies_record(): """ @PRE: `config_id` corresponds to an existing configuration. @POST: The configuration record is updated in the database, preserving PAT if masked is sent. """ existing_config = GitServerConfig( - id="config-1", name="Old Server", provider=GitProvider.GITHUB, - url="https://github.com", pat="old-token", - status=GitStatus.CONNECTED, last_validated="2026-03-08T00:00:00Z" + id="config-1", + name="Old Server", + provider=GitProvider.GITHUB, + url="https://github.com", + pat="old-token", + status=GitStatus.CONNECTED, + last_validated="2026-03-08T00:00:00Z", ) + # The monkeypatched query will return existing_config as it's the only one in the list class SingleConfigDbMock: - def query(self, *args): return self - def filter(self, *args): return self - def first(self): return existing_config - def commit(self): pass - def refresh(self, config): pass + def query(self, *args): + return self + + def filter(self, *args): + return self + + def first(self): + return existing_config + + def commit(self): + pass + + def refresh(self, config): + pass db = SingleConfigDbMock() update_data = GitServerConfigUpdate(name="Updated Server", pat="********") - - result = asyncio.run(git_routes.update_git_config(config_id="config-1", config_update=update_data, db=db)) - + + result = asyncio.run( + git_routes.update_git_config( + config_id="config-1", config_update=update_data, db=db + ) + ) + assert existing_config.name == "Updated Server" - assert existing_config.pat == "old-token" # Ensure PAT is not overwritten with asterisks + assert ( + existing_config.pat == "old-token" + ) # Ensure PAT is not overwritten with asterisks assert result.pat == "********" + +# [/DEF:test_update_git_config_modifies_record:Function] + + +# [DEF:test_update_git_config_raises_404_if_not_found:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_update_git_config_raises_404_if_not_found(): """ @PRE: `config_id` corresponds to a missing configuration. @THROW: HTTPException 404 """ - db = DbMock([]) # Empty db + db = DbMock([]) # Empty db update_data = GitServerConfigUpdate(name="Updated Server", pat="new-token") - + with pytest.raises(HTTPException) as exc_info: - asyncio.run(git_routes.update_git_config(config_id="config-1", config_update=update_data, db=db)) - + asyncio.run( + git_routes.update_git_config( + config_id="config-1", config_update=update_data, db=db + ) + ) + assert exc_info.value.status_code == 404 assert exc_info.value.detail == "Configuration not found" + +# [/DEF:test_update_git_config_raises_404_if_not_found:Function] + + +# [DEF:test_delete_git_config_removes_record:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_delete_git_config_removes_record(): """ @PRE: `config_id` corresponds to an existing configuration. @POST: The configuration record is removed from the database. """ existing_config = GitServerConfig(id="config-1") + class SingleConfigDbMock: - def query(self, *args): return self - def filter(self, *args): return self - def first(self): return existing_config - def delete(self, config): self.deleted = config - def commit(self): pass + def query(self, *args): + return self + + def filter(self, *args): + return self + + def first(self): + return existing_config + + def delete(self, config): + self.deleted = config + + def commit(self): + pass db = SingleConfigDbMock() - + result = asyncio.run(git_routes.delete_git_config(config_id="config-1", db=db)) - + assert db.deleted == existing_config assert result["status"] == "success" + +# [/DEF:test_delete_git_config_removes_record:Function] + + +# [DEF:test_test_git_config_validates_connection_successfully:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_test_git_config_validates_connection_successfully(monkeypatch): """ @PRE: `config` contains provider, url, and pat. @POST: Returns success if the connection is validated via GitService. """ + class MockGitService: async def test_connection(self, provider, url, pat): return True monkeypatch.setattr(git_routes, "git_service", MockGitService()) from src.api.routes.git_schemas import GitServerConfigCreate - + config = GitServerConfigCreate( - name="Test Server", provider=GitProvider.GITHUB, - url="https://github.com", pat="test-pat" + name="Test Server", + provider=GitProvider.GITHUB, + url="https://github.com", + pat="test-pat", ) db = DbMock([]) - + result = asyncio.run(git_routes.test_git_config(config=config, db=db)) - + assert result["status"] == "success" + +# [/DEF:test_test_git_config_validates_connection_successfully:Function] + + +# [DEF:test_test_git_config_fails_validation:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_test_git_config_fails_validation(monkeypatch): """ @PRE: `config` contains provider, url, and pat BUT connection fails. @THROW: HTTPException 400 """ + class MockGitService: async def test_connection(self, provider, url, pat): return False monkeypatch.setattr(git_routes, "git_service", MockGitService()) from src.api.routes.git_schemas import GitServerConfigCreate - + config = GitServerConfigCreate( - name="Test Server", provider=GitProvider.GITHUB, - url="https://github.com", pat="bad-pat" + name="Test Server", + provider=GitProvider.GITHUB, + url="https://github.com", + pat="bad-pat", ) db = DbMock([]) - + with pytest.raises(HTTPException) as exc_info: asyncio.run(git_routes.test_git_config(config=config, db=db)) - + assert exc_info.value.status_code == 400 assert exc_info.value.detail == "Connection failed" + +# [/DEF:test_test_git_config_fails_validation:Function] + + +# [DEF:test_list_gitea_repositories_returns_payload:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_list_gitea_repositories_returns_payload(monkeypatch): """ @PRE: config_id exists and provider is GITEA. @POST: Returns repositories visible to PAT user. """ + class MockGitService: async def list_gitea_repositories(self, url, pat): - return [{"name": "test-repo", "full_name": "owner/test-repo", "private": True}] + return [ + {"name": "test-repo", "full_name": "owner/test-repo", "private": True} + ] monkeypatch.setattr(git_routes, "git_service", MockGitService()) existing_config = GitServerConfig( - id="config-1", name="Gitea Server", provider=GitProvider.GITEA, - url="https://gitea.local", pat="gitea-token" + id="config-1", + name="Gitea Server", + provider=GitProvider.GITEA, + url="https://gitea.local", + pat="gitea-token", ) db = DbMock([existing_config]) - - result = asyncio.run(git_routes.list_gitea_repositories(config_id="config-1", db=db)) - + + result = asyncio.run( + git_routes.list_gitea_repositories(config_id="config-1", db=db) + ) + assert len(result) == 1 assert result[0].name == "test-repo" assert result[0].private is True + +# [/DEF:test_list_gitea_repositories_returns_payload:Function] + + +# [DEF:test_list_gitea_repositories_rejects_non_gitea:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_list_gitea_repositories_rejects_non_gitea(monkeypatch): """ @PRE: config_id exists and provider is NOT GITEA. @THROW: HTTPException 400 """ existing_config = GitServerConfig( - id="config-1", name="GitHub Server", provider=GitProvider.GITHUB, - url="https://github.com", pat="token" + id="config-1", + name="GitHub Server", + provider=GitProvider.GITHUB, + url="https://github.com", + pat="token", ) db = DbMock([existing_config]) - + with pytest.raises(HTTPException) as exc_info: asyncio.run(git_routes.list_gitea_repositories(config_id="config-1", db=db)) - + assert exc_info.value.status_code == 400 assert "GITEA provider only" in exc_info.value.detail + +# [/DEF:test_list_gitea_repositories_rejects_non_gitea:Function] + + +# [DEF:test_create_remote_repository_creates_provider_repo:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_create_remote_repository_creates_provider_repo(monkeypatch): """ @PRE: config_id exists and PAT has creation permissions. @POST: Returns normalized remote repository payload. """ + class MockGitService: - async def create_gitlab_repository(self, server_url, pat, name, private, description, auto_init, default_branch): + async def create_gitlab_repository( + self, server_url, pat, name, private, description, auto_init, default_branch + ): return { "name": name, "full_name": f"user/{name}", "private": private, - "clone_url": f"{server_url}/user/{name}.git" + "clone_url": f"{server_url}/user/{name}.git", } monkeypatch.setattr(git_routes, "git_service", MockGitService()) from src.api.routes.git_schemas import RemoteRepoCreateRequest - + existing_config = GitServerConfig( - id="config-1", name="GitLab Server", provider=GitProvider.GITLAB, - url="https://gitlab.com", pat="token" + id="config-1", + name="GitLab Server", + provider=GitProvider.GITLAB, + url="https://gitlab.com", + pat="token", ) db = DbMock([existing_config]) - + request = RemoteRepoCreateRequest(name="new-repo", private=True, description="desc") - result = asyncio.run(git_routes.create_remote_repository(config_id="config-1", request=request, db=db)) - + result = asyncio.run( + git_routes.create_remote_repository( + config_id="config-1", request=request, db=db + ) + ) + assert result.provider == GitProvider.GITLAB assert result.name == "new-repo" assert result.full_name == "user/new-repo" + +# [/DEF:test_create_remote_repository_creates_provider_repo:Function] + + +# [DEF:test_init_repository_initializes_and_saves_binding:Function] +# @RELATION: BINDS_TO ->[TestGitApi] def test_init_repository_initializes_and_saves_binding(monkeypatch): """ @PRE: `dashboard_ref` exists and `init_data` contains valid config_id and remote_url. @POST: Repository is initialized on disk and a GitRepository record is saved in DB. """ from src.api.routes.git_schemas import RepoInitRequest - + class MockGitService: def init_repo(self, dashboard_id, remote_url, pat, repo_key, default_branch): self.init_called = True + def _get_repo_path(self, dashboard_id, repo_key): return f"/tmp/repos/{repo_key}" git_service_mock = MockGitService() monkeypatch.setattr(git_routes, "git_service", git_service_mock) - monkeypatch.setattr(git_routes, "_resolve_dashboard_id_from_ref", lambda *args, **kwargs: 123) - monkeypatch.setattr(git_routes, "_resolve_repo_key_from_ref", lambda *args, **kwargs: "dashboard-123") - + monkeypatch.setattr( + git_routes, "_resolve_dashboard_id_from_ref", lambda *args, **kwargs: 123 + ) + monkeypatch.setattr( + git_routes, + "_resolve_repo_key_from_ref", + lambda *args, **kwargs: "dashboard-123", + ) + existing_config = GitServerConfig( - id="config-1", name="GitLab Server", provider=GitProvider.GITLAB, - url="https://gitlab.com", pat="token", default_branch="main" + id="config-1", + name="GitLab Server", + provider=GitProvider.GITLAB, + url="https://gitlab.com", + pat="token", + default_branch="main", ) db = DbMock([existing_config]) - - init_data = RepoInitRequest(config_id="config-1", remote_url="https://git.local/repo.git") - - result = asyncio.run(git_routes.init_repository(dashboard_ref="123", init_data=init_data, config_manager=MagicMock(), db=db)) - + + init_data = RepoInitRequest( + config_id="config-1", remote_url="https://git.local/repo.git" + ) + + result = asyncio.run( + git_routes.init_repository( + dashboard_ref="123", init_data=init_data, config_manager=MagicMock(), db=db + ) + ) + assert result["status"] == "success" assert git_service_mock.init_called is True assert len(db._added) == 1 assert isinstance(db._added[0], GitRepository) assert db._added[0].dashboard_id == 123 -# [/DEF:backend.src.api.routes.__tests__.test_git_api:Module] + +# [/DEF:test_init_repository_initializes_and_saves_binding:Function] +# [/DEF:TestGitApi:Module] diff --git a/backend/src/api/routes/assistant.py b/backend/src/api/routes/assistant.py index 26006579..064d5160 100644 --- a/backend/src/api/routes/assistant.py +++ b/backend/src/api/routes/assistant.py @@ -1,5 +1,5 @@ -# [DEF:backend.src.api.routes.assistant:Module] -# @COMPLEXITY: 3 +# [DEF:AssistantApi:Module] +# @COMPLEXITY: 5 # @SEMANTICS: api, assistant, chat, command, confirmation # @PURPOSE: API routes for LLM assistant command parsing and safe execution orchestration. # @LAYER: API @@ -22,7 +22,12 @@ from sqlalchemy import desc from ...core.logger import belief_scope, logger from ...core.task_manager import TaskManager -from ...dependencies import get_current_user, get_task_manager, get_config_manager, has_permission +from ...dependencies import ( + get_current_user, + get_task_manager, + get_config_manager, + has_permission, +) from ...core.config_manager import ConfigManager from ...core.database import get_db from ...services.git_service import GitService @@ -54,6 +59,8 @@ git_service = GitService() class AssistantMessageRequest(BaseModel): conversation_id: Optional[str] = None message: str = Field(..., min_length=1, max_length=4000) + + # [/DEF:AssistantMessageRequest:Class] @@ -66,11 +73,13 @@ class AssistantAction(BaseModel): type: str label: str target: Optional[str] = None + + # [/DEF:AssistantAction:Class] # [DEF:AssistantMessageResponse:Class] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Output payload contract for assistant interaction endpoints. # @PRE: Response includes deterministic state and text. # @POST: Payload may include task_id/confirmation_id/actions for UI follow-up. @@ -84,11 +93,13 @@ class AssistantMessageResponse(BaseModel): task_id: Optional[str] = None actions: List[AssistantAction] = Field(default_factory=list) created_at: datetime + + # [/DEF:AssistantMessageResponse:Class] # [DEF:ConfirmationRecord:Class] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: In-memory confirmation token model for risky operation dispatch. # @PRE: intent/dispatch/user_id are populated at confirmation request time. # @POST: Record tracks lifecycle state and expiry timestamp. @@ -101,6 +112,8 @@ class ConfirmationRecord(BaseModel): expires_at: datetime state: str = "pending" created_at: datetime + + # [/DEF:ConfirmationRecord:Class] @@ -116,7 +129,10 @@ INTENT_PERMISSION_CHECKS: Dict[str, List[Tuple[str, str]]] = { "create_branch": [("plugin:git", "EXECUTE")], "commit_changes": [("plugin:git", "EXECUTE")], "deploy_dashboard": [("plugin:git", "EXECUTE")], - "execute_migration": [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")], + "execute_migration": [ + ("plugin:migration", "EXECUTE"), + ("plugin:superset-migration", "EXECUTE"), + ], "run_backup": [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")], "run_llm_validation": [("plugin:llm_dashboard_validation", "EXECUTE")], "run_llm_documentation": [("plugin:llm_documentation", "EXECUTE")], @@ -125,7 +141,7 @@ INTENT_PERMISSION_CHECKS: Dict[str, List[Tuple[str, str]]] = { # [DEF:_append_history:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Append conversation message to in-memory history buffer. # @PRE: user_id and conversation_id identify target conversation bucket. # @POST: Message entry is appended to CONVERSATIONS key list. @@ -153,11 +169,13 @@ def _append_history( "created_at": datetime.utcnow(), } ) + + # [/DEF:_append_history:Function] # [DEF:_persist_message:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Persist assistant/user message record to database. # @PRE: db session is writable and message payload is serializable. # @POST: Message row is committed or persistence failure is logged. @@ -189,28 +207,36 @@ def _persist_message( except Exception as exc: db.rollback() logger.warning(f"[assistant.message][persist_failed] {exc}") + + # [/DEF:_persist_message:Function] # [DEF:_audit:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Append in-memory audit record for assistant decision trace. # @PRE: payload describes decision/outcome fields. # @POST: ASSISTANT_AUDIT list for user contains new timestamped entry. def _audit(user_id: str, payload: Dict[str, Any]): if user_id not in ASSISTANT_AUDIT: ASSISTANT_AUDIT[user_id] = [] - ASSISTANT_AUDIT[user_id].append({**payload, "created_at": datetime.utcnow().isoformat()}) + ASSISTANT_AUDIT[user_id].append( + {**payload, "created_at": datetime.utcnow().isoformat()} + ) logger.info(f"[assistant.audit] {payload}") + + # [/DEF:_audit:Function] # [DEF:_persist_audit:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Persist structured assistant audit payload in database. # @PRE: db session is writable and payload is JSON-serializable. # @POST: Audit row is committed or failure is logged with rollback. -def _persist_audit(db: Session, user_id: str, payload: Dict[str, Any], conversation_id: Optional[str]): +def _persist_audit( + db: Session, user_id: str, payload: Dict[str, Any], conversation_id: Optional[str] +): try: row = AssistantAuditRecord( id=str(uuid.uuid4()), @@ -226,11 +252,13 @@ def _persist_audit(db: Session, user_id: str, payload: Dict[str, Any], conversat except Exception as exc: db.rollback() logger.warning(f"[assistant.audit][persist_failed] {exc}") + + # [/DEF:_persist_audit:Function] # [DEF:_persist_confirmation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Persist confirmation token record to database. # @PRE: record contains id/user/intent/dispatch/expiry fields. # @POST: Confirmation row exists in persistent storage. @@ -252,17 +280,23 @@ def _persist_confirmation(db: Session, record: ConfirmationRecord): except Exception as exc: db.rollback() logger.warning(f"[assistant.confirmation][persist_failed] {exc}") + + # [/DEF:_persist_confirmation:Function] # [DEF:_update_confirmation_state:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Update persistent confirmation token lifecycle state. # @PRE: confirmation_id references existing row. # @POST: State and consumed_at fields are updated when applicable. def _update_confirmation_state(db: Session, confirmation_id: str, state: str): try: - row = db.query(AssistantConfirmationRecord).filter(AssistantConfirmationRecord.id == confirmation_id).first() + row = ( + db.query(AssistantConfirmationRecord) + .filter(AssistantConfirmationRecord.id == confirmation_id) + .first() + ) if not row: return row.state = state @@ -272,15 +306,19 @@ def _update_confirmation_state(db: Session, confirmation_id: str, state: str): except Exception as exc: db.rollback() logger.warning(f"[assistant.confirmation][update_failed] {exc}") + + # [/DEF:_update_confirmation_state:Function] # [DEF:_load_confirmation_from_db:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Load confirmation token from database into in-memory model. # @PRE: confirmation_id may or may not exist in storage. # @POST: Returns ConfirmationRecord when found, otherwise None. -def _load_confirmation_from_db(db: Session, confirmation_id: str) -> Optional[ConfirmationRecord]: +def _load_confirmation_from_db( + db: Session, confirmation_id: str +) -> Optional[ConfirmationRecord]: row = ( db.query(AssistantConfirmationRecord) .filter(AssistantConfirmationRecord.id == confirmation_id) @@ -298,11 +336,13 @@ def _load_confirmation_from_db(db: Session, confirmation_id: str) -> Optional[Co state=row.state, created_at=row.created_at, ) + + # [/DEF:_load_confirmation_from_db:Function] # [DEF:_ensure_conversation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve active conversation id in memory or create a new one. # @PRE: user_id identifies current actor. # @POST: Returns stable conversation id and updates USER_ACTIVE_CONVERSATION. @@ -318,15 +358,19 @@ def _ensure_conversation(user_id: str, conversation_id: Optional[str]) -> str: new_id = str(uuid.uuid4()) USER_ACTIVE_CONVERSATION[user_id] = new_id return new_id + + # [/DEF:_ensure_conversation:Function] # [DEF:_resolve_or_create_conversation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve active conversation using explicit id, memory cache, or persisted history. # @PRE: user_id and db session are available. # @POST: Returns conversation id and updates USER_ACTIVE_CONVERSATION cache. -def _resolve_or_create_conversation(user_id: str, conversation_id: Optional[str], db: Session) -> str: +def _resolve_or_create_conversation( + user_id: str, conversation_id: Optional[str], db: Session +) -> str: if conversation_id: USER_ACTIVE_CONVERSATION[user_id] = conversation_id return conversation_id @@ -348,11 +392,13 @@ def _resolve_or_create_conversation(user_id: str, conversation_id: Optional[str] new_id = str(uuid.uuid4()) USER_ACTIVE_CONVERSATION[user_id] = new_id return new_id + + # [/DEF:_resolve_or_create_conversation:Function] # [DEF:_cleanup_history_ttl:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Enforce assistant message retention window by deleting expired rows and in-memory records. # @PRE: db session is available and user_id references current actor scope. # @POST: Messages older than ASSISTANT_MESSAGE_TTL_DAYS are removed from persistence and memory mirrors. @@ -368,7 +414,9 @@ def _cleanup_history_ttl(db: Session, user_id: str): db.commit() except Exception as exc: db.rollback() - logger.warning(f"[assistant.history][ttl_cleanup_failed] user={user_id} error={exc}") + logger.warning( + f"[assistant.history][ttl_cleanup_failed] user={user_id} error={exc}" + ) stale_keys: List[Tuple[str, str]] = [] for key, items in CONVERSATIONS.items(): @@ -386,11 +434,13 @@ def _cleanup_history_ttl(db: Session, user_id: str): stale_keys.append(key) for key in stale_keys: CONVERSATIONS.pop(key, None) + + # [/DEF:_cleanup_history_ttl:Function] # [DEF:_is_conversation_archived:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Determine archived state for a conversation based on last update timestamp. # @PRE: updated_at can be null for empty conversations. # @POST: Returns True when conversation inactivity exceeds archive threshold. @@ -399,11 +449,13 @@ def _is_conversation_archived(updated_at: Optional[datetime]) -> bool: return False cutoff = datetime.utcnow() - timedelta(days=ASSISTANT_ARCHIVE_AFTER_DAYS) return updated_at < cutoff + + # [/DEF:_is_conversation_archived:Function] # [DEF:_coerce_query_bool:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Normalize bool-like query values for compatibility in direct handler invocations/tests. # @PRE: value may be bool, string, or FastAPI Query metadata object. # @POST: Returns deterministic boolean flag. @@ -413,11 +465,13 @@ def _coerce_query_bool(value: Any) -> bool: if isinstance(value, str): return value.strip().lower() in {"1", "true", "yes", "on"} return False + + # [/DEF:_coerce_query_bool:Function] # [DEF:_extract_id:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Extract first regex match group from text by ordered pattern list. # @PRE: patterns contain at least one capture group. # @POST: Returns first matched token or None. @@ -427,15 +481,19 @@ def _extract_id(text: str, patterns: List[str]) -> Optional[str]: if m: return m.group(1) return None + + # [/DEF:_extract_id:Function] # [DEF:_resolve_env_id:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve environment identifier/name token to canonical environment id. # @PRE: config_manager provides environment list. # @POST: Returns matched environment id or None. -def _resolve_env_id(token: Optional[str], config_manager: ConfigManager) -> Optional[str]: +def _resolve_env_id( + token: Optional[str], config_manager: ConfigManager +) -> Optional[str]: if not token: return None @@ -445,11 +503,13 @@ def _resolve_env_id(token: Optional[str], config_manager: ConfigManager) -> Opti if env.id.lower() == normalized or env.name.lower() == normalized: return env.id return None + + # [/DEF:_resolve_env_id:Function] # [DEF:_is_production_env:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Determine whether environment token resolves to production-like target. # @PRE: config_manager provides environments or token text is provided. # @POST: Returns True for production/prod synonyms, else False. @@ -463,11 +523,13 @@ def _is_production_env(token: Optional[str], config_manager: ConfigManager) -> b return False target = f"{env.id} {env.name}".lower() return "prod" in target or "production" in target or "прод" in target + + # [/DEF:_is_production_env:Function] # [DEF:_resolve_provider_id:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve provider token to provider id with active/default fallback. # @PRE: db session can load provider list through LLMProviderService. # @POST: Returns provider id or None when no providers configured. @@ -499,11 +561,13 @@ def _resolve_provider_id( active = next((p for p in providers if p.is_active), None) return active.id if active else providers[0].id + + # [/DEF:_resolve_provider_id:Function] # [DEF:_get_default_environment_id:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve default environment id from settings or first configured environment. # @PRE: config_manager returns environments list. # @POST: Returns default environment id or None when environment list is empty. @@ -519,13 +583,17 @@ def _get_default_environment_id(config_manager: ConfigManager) -> Optional[str]: preferred = None if preferred and any(env.id == preferred for env in configured): return preferred - explicit_default = next((env.id for env in configured if getattr(env, "is_default", False)), None) + explicit_default = next( + (env.id for env in configured if getattr(env, "is_default", False)), None + ) return explicit_default or configured[0].id + + # [/DEF:_get_default_environment_id:Function] # [DEF:_resolve_dashboard_id_by_ref:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard id by title or slug reference in selected environment. # @PRE: dashboard_ref is a non-empty string-like token. # @POST: Returns dashboard id when uniquely matched, otherwise None. @@ -536,7 +604,9 @@ def _resolve_dashboard_id_by_ref( ) -> Optional[int]: if not dashboard_ref or not env_id: return None - env = next((item for item in config_manager.get_environments() if item.id == env_id), None) + env = next( + (item for item in config_manager.get_environments() if item.id == env_id), None + ) if not env: return None @@ -545,12 +615,15 @@ def _resolve_dashboard_id_by_ref( client = SupersetClient(env) _, dashboards = client.get_dashboards(query={"page_size": 200}) except Exception as exc: - logger.warning(f"[assistant.dashboard_resolve][failed] ref={dashboard_ref} env={env_id} error={exc}") + logger.warning( + f"[assistant.dashboard_resolve][failed] ref={dashboard_ref} env={env_id} error={exc}" + ) return None exact = next( ( - d for d in dashboards + d + for d in dashboards if str(d.get("slug", "")).lower() == needle or str(d.get("dashboard_title", "")).lower() == needle or str(d.get("title", "")).lower() == needle @@ -560,15 +633,21 @@ def _resolve_dashboard_id_by_ref( if exact: return int(exact.get("id")) - partial = [d for d in dashboards if needle in str(d.get("dashboard_title", d.get("title", ""))).lower()] + partial = [ + d + for d in dashboards + if needle in str(d.get("dashboard_title", d.get("title", ""))).lower() + ] if len(partial) == 1 and partial[0].get("id") is not None: return int(partial[0]["id"]) return None + + # [/DEF:_resolve_dashboard_id_by_ref:Function] # [DEF:_resolve_dashboard_id_entity:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard id from intent entities using numeric id or dashboard_ref fallback. # @PRE: entities may contain dashboard_id as int/str and optional dashboard_ref. # @POST: Returns resolved dashboard id or None when ambiguous/unresolvable. @@ -593,31 +672,50 @@ def _resolve_dashboard_id_entity( if not dashboard_ref: return None - env_token = env_hint or entities.get("environment") or entities.get("source_env") or entities.get("target_env") - env_id = _resolve_env_id(env_token, config_manager) if env_token else _get_default_environment_id(config_manager) + env_token = ( + env_hint + or entities.get("environment") + or entities.get("source_env") + or entities.get("target_env") + ) + env_id = ( + _resolve_env_id(env_token, config_manager) + if env_token + else _get_default_environment_id(config_manager) + ) return _resolve_dashboard_id_by_ref(str(dashboard_ref), env_id, config_manager) + + # [/DEF:_resolve_dashboard_id_entity:Function] # [DEF:_get_environment_name_by_id:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve human-readable environment name by id. # @PRE: environment id may be None. # @POST: Returns matching environment name or fallback id. -def _get_environment_name_by_id(env_id: Optional[str], config_manager: ConfigManager) -> str: +def _get_environment_name_by_id( + env_id: Optional[str], config_manager: ConfigManager +) -> str: if not env_id: return "unknown" - env = next((item for item in config_manager.get_environments() if item.id == env_id), None) + env = next( + (item for item in config_manager.get_environments() if item.id == env_id), None + ) return env.name if env else env_id + + # [/DEF:_get_environment_name_by_id:Function] # [DEF:_extract_result_deep_links:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Build deep-link actions to verify task result from assistant chat. # @PRE: task object is available. # @POST: Returns zero or more assistant actions for dashboard open/diff. -def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List[AssistantAction]: +def _extract_result_deep_links( + task: Any, config_manager: ConfigManager +) -> List[AssistantAction]: plugin_id = getattr(task, "plugin_id", None) params = getattr(task, "params", {}) or {} result = getattr(task, "result", {}) or {} @@ -626,12 +724,18 @@ def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List env_id: Optional[str] = None if plugin_id == "superset-migration": - migrated = result.get("migrated_dashboards") if isinstance(result, dict) else None + migrated = ( + result.get("migrated_dashboards") if isinstance(result, dict) else None + ) if isinstance(migrated, list) and migrated: first = migrated[0] if isinstance(first, dict) and first.get("id") is not None: dashboard_id = int(first.get("id")) - if dashboard_id is None and isinstance(params.get("selected_ids"), list) and params["selected_ids"]: + if ( + dashboard_id is None + and isinstance(params.get("selected_ids"), list) + and params["selected_ids"] + ): dashboard_id = int(params["selected_ids"][0]) env_id = params.get("target_env_id") elif plugin_id == "superset-backup": @@ -640,9 +744,15 @@ def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List first = dashboards[0] if isinstance(first, dict) and first.get("id") is not None: dashboard_id = int(first.get("id")) - if dashboard_id is None and isinstance(params.get("dashboard_ids"), list) and params["dashboard_ids"]: + if ( + dashboard_id is None + and isinstance(params.get("dashboard_ids"), list) + and params["dashboard_ids"] + ): dashboard_id = int(params["dashboard_ids"][0]) - env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager) + env_id = params.get("environment_id") or _resolve_env_id( + result.get("environment"), config_manager + ) elif plugin_id == "llm_dashboard_validation": if params.get("dashboard_id") is not None: dashboard_id = int(params["dashboard_id"]) @@ -666,11 +776,13 @@ def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List ) ) return actions + + # [/DEF:_extract_result_deep_links:Function] # [DEF:_build_task_observability_summary:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Build compact textual summary for completed tasks to reduce "black box" effect. # @PRE: task may contain plugin-specific result payload. # @POST: Returns non-empty summary line for known task types or empty string fallback. @@ -705,7 +817,9 @@ def _build_task_observability_summary(task: Any, config_manager: ConfigManager) total = int(result.get("total_dashboards", 0) or 0) ok = int(result.get("backed_up_dashboards", 0) or 0) failed = int(result.get("failed_dashboards", 0) or 0) - env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager) + env_id = params.get("environment_id") or _resolve_env_id( + result.get("environment"), config_manager + ) env_name = _get_environment_name_by_id(env_id, config_manager) failures = result.get("failures") or [] warning = "" @@ -730,11 +844,13 @@ def _build_task_observability_summary(task: Any, config_manager: ConfigManager) if status in {"SUCCESS", "FAILED"}: return f"Задача завершена со статусом {status}." return "" + + # [/DEF:_build_task_observability_summary:Function] # [DEF:_parse_command:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 4 # @PURPOSE: Deterministically parse RU/EN command text into intent payload. # @PRE: message contains raw user text and config manager resolves environments. # @POST: Returns intent dict with domain/operation/entities/confidence/risk fields. @@ -784,7 +900,9 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any } # Git branch create - if any(k in lower for k in ["ветк", "branch"]) and any(k in lower for k in ["созд", "сделай", "create"]): + if any(k in lower for k in ["ветк", "branch"]) and any( + k in lower for k in ["созд", "сделай", "create"] + ): branch = _extract_id(lower, [r"(?:ветк\w*|branch)\s+([a-z0-9._/-]+)"]) return { "domain": "git", @@ -801,7 +919,9 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any # Git commit if any(k in lower for k in ["коммит", "commit"]): quoted = re.search(r'"([^"]{3,120})"', text) - message_text = quoted.group(1) if quoted else "assistant: update dashboard changes" + message_text = ( + quoted.group(1) if quoted else "assistant: update dashboard changes" + ) return { "domain": "git", "operation": "commit_changes", @@ -836,7 +956,7 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any tgt = _extract_id(lower, [r"(?:на|to)\s+([a-z0-9_-]+)"]) dry_run = "--dry-run" in lower or "dry run" in lower replace_db_config = "--replace-db-config" in lower - fix_cross_filters = "--fix-cross-filters" not in lower # Default true usually, but let's say test uses --dry-run + fix_cross_filters = "--no-fix-cross-filters" not in lower is_dangerous = _is_production_env(tgt, config_manager) return { "domain": "migration", @@ -847,7 +967,7 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any "target_env": tgt, "dry_run": dry_run, "replace_db_config": replace_db_config, - "fix_cross_filters": True, + "fix_cross_filters": fix_cross_filters, }, "confidence": 0.95 if dashboard_id and src and tgt else 0.72, "risk_level": "dangerous" if is_dangerous else "guarded", @@ -890,7 +1010,9 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any "operation": "run_llm_validation", "entities": { "dashboard_id": int(dashboard_id) if dashboard_id else None, - "dashboard_ref": dashboard_ref if (dashboard_ref and not dashboard_ref.isdigit()) else None, + "dashboard_ref": dashboard_ref + if (dashboard_ref and not dashboard_ref.isdigit()) + else None, "environment": env_match, "provider": provider_match, }, @@ -900,7 +1022,10 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any } # Documentation - if any(k in lower for k in ["документац", "documentation", "generate docs", "сгенерируй док"]): + if any( + k in lower + for k in ["документац", "documentation", "generate docs", "сгенерируй док"] + ): env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"]) provider_match = _extract_id(lower, [r"(?:provider|провайдер)\s+([a-z0-9_-]+)"]) return { @@ -924,11 +1049,13 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any "risk_level": "safe", "requires_confirmation": False, } + + # [/DEF:_parse_command:Function] # [DEF:_check_any_permission:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Validate user against alternative permission checks (logical OR). # @PRE: checks list contains resource-action tuples. # @POST: Returns on first successful permission; raises 403-like HTTPException otherwise. @@ -941,12 +1068,18 @@ def _check_any_permission(current_user: User, checks: List[Tuple[str, str]]): except HTTPException as exc: errors.append(exc) - raise errors[-1] if errors else HTTPException(status_code=403, detail="Permission denied") + raise ( + errors[-1] + if errors + else HTTPException(status_code=403, detail="Permission denied") + ) + + # [/DEF:_check_any_permission:Function] # [DEF:_has_any_permission:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Check whether user has at least one permission tuple from the provided list. # @PRE: current_user and checks list are valid. # @POST: Returns True when at least one permission check passes. @@ -956,6 +1089,8 @@ def _has_any_permission(current_user: User, checks: List[Tuple[str, str]]) -> bo return True except HTTPException: return False + + # [/DEF:_has_any_permission:Function] @@ -964,7 +1099,9 @@ def _has_any_permission(current_user: User, checks: List[Tuple[str, str]]) -> bo # @PURPOSE: Build current-user tool catalog for LLM planner with operation contracts and defaults. # @PRE: current_user is authenticated; config/db are available. # @POST: Returns list of executable tools filtered by permission and runtime availability. -def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: Session) -> List[Dict[str, Any]]: +def _build_tool_catalog( + current_user: User, config_manager: ConfigManager, db: Session +) -> List[Dict[str, Any]]: envs = config_manager.get_environments() default_env_id = _get_default_environment_id(config_manager) providers = LLMProviderService(db).get_all_providers() @@ -975,8 +1112,13 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S llm_settings = {} active_provider = next((p.id for p in providers if p.is_active), None) fallback_provider = active_provider or (providers[0].id if providers else None) - validation_provider = resolve_bound_provider_id(llm_settings, "dashboard_validation") or fallback_provider - documentation_provider = resolve_bound_provider_id(llm_settings, "documentation") or fallback_provider + validation_provider = ( + resolve_bound_provider_id(llm_settings, "dashboard_validation") + or fallback_provider + ) + documentation_provider = ( + resolve_bound_provider_id(llm_settings, "documentation") or fallback_provider + ) candidates: List[Dict[str, Any]] = [ { @@ -1029,7 +1171,12 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S "domain": "migration", "description": "Run dashboard migration (id/slug/title) between environments. Optional boolean flags: replace_db_config, fix_cross_filters", "required_entities": ["source_env", "target_env"], - "optional_entities": ["dashboard_id", "dashboard_ref", "replace_db_config", "fix_cross_filters"], + "optional_entities": [ + "dashboard_id", + "dashboard_ref", + "replace_db_config", + "fix_cross_filters", + ], "risk_level": "guarded", "requires_confirmation": False, }, @@ -1048,7 +1195,10 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S "description": "Run LLM dashboard validation by dashboard id/slug/title", "required_entities": [], "optional_entities": ["dashboard_ref", "environment", "provider"], - "defaults": {"environment": default_env_id, "provider": validation_provider}, + "defaults": { + "environment": default_env_id, + "provider": validation_provider, + }, "risk_level": "guarded", "requires_confirmation": False, }, @@ -1058,7 +1208,10 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S "description": "Generate dataset documentation via LLM", "required_entities": ["dataset_id"], "optional_entities": ["environment", "provider"], - "defaults": {"environment": default_env_id, "provider": documentation_provider}, + "defaults": { + "environment": default_env_id, + "provider": documentation_provider, + }, "risk_level": "guarded", "requires_confirmation": False, }, @@ -1080,11 +1233,13 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S continue available.append(tool) return available + + # [/DEF:_build_tool_catalog:Function] # [DEF:_coerce_intent_entities:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Normalize intent entity value types from LLM output to route-compatible values. # @PRE: intent contains entities dict or missing entities. # @POST: Returned intent has numeric ids coerced where possible and string values stripped. @@ -1101,6 +1256,8 @@ def _coerce_intent_entities(intent: Dict[str, Any]) -> Dict[str, Any]: if isinstance(value, str): entities[key] = value.strip() return intent + + # [/DEF:_coerce_intent_entities:Function] @@ -1109,11 +1266,13 @@ _SAFE_OPS = {"show_capabilities", "get_task_status", "get_health_summary"} # [DEF:_confirmation_summary:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 4 # @PURPOSE: Build human-readable confirmation prompt for an intent before execution. # @PRE: intent contains operation and entities fields. # @POST: Returns descriptive Russian-language text ending with confirmation prompt. -async def _async_confirmation_summary(intent: Dict[str, Any], config_manager: ConfigManager, db: Session) -> str: +async def _async_confirmation_summary( + intent: Dict[str, Any], config_manager: ConfigManager, db: Session +) -> str: operation = intent.get("operation", "") entities = intent.get("entities", {}) descriptions: Dict[str, str] = { @@ -1144,72 +1303,119 @@ async def _async_confirmation_summary(intent: Dict[str, Any], config_manager: Co if operation == "execute_migration": flags = [] - flags.append("маппинг БД: " + ("ВКЛ" if _coerce_query_bool(entities.get("replace_db_config", False)) else "ВЫКЛ")) - flags.append("исправление кроссфильтров: " + ("ВКЛ" if _coerce_query_bool(entities.get("fix_cross_filters", True)) else "ВЫКЛ")) + flags.append( + "маппинг БД: " + + ( + "ВКЛ" + if _coerce_query_bool(entities.get("replace_db_config", False)) + else "ВЫКЛ" + ) + ) + flags.append( + "исправление кроссфильтров: " + + ( + "ВКЛ" + if _coerce_query_bool(entities.get("fix_cross_filters", True)) + else "ВЫКЛ" + ) + ) dry_run_enabled = _coerce_query_bool(entities.get("dry_run", False)) flags.append("отчет dry-run: " + ("ВКЛ" if dry_run_enabled else "ВЫКЛ")) text += f" ({', '.join(flags)})" if dry_run_enabled: try: - from ...core.migration.dry_run_orchestrator import MigrationDryRunService + from ...core.migration.dry_run_orchestrator import ( + MigrationDryRunService, + ) from ...models.dashboard import DashboardSelection from ...core.superset_client import SupersetClient - + src_token = entities.get("source_env") tgt_token = entities.get("target_env") - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token) - + dashboard_id = _resolve_dashboard_id_entity( + entities, config_manager, env_hint=src_token + ) + if dashboard_id and src_token and tgt_token: src_env_id = _resolve_env_id(src_token, config_manager) tgt_env_id = _resolve_env_id(tgt_token, config_manager) - + if src_env_id and tgt_env_id: - env_map = {env.id: env for env in config_manager.get_environments()} + env_map = { + env.id: env for env in config_manager.get_environments() + } source_env = env_map.get(src_env_id) target_env = env_map.get(tgt_env_id) - + if source_env and target_env and source_env.id != target_env.id: selection = DashboardSelection( source_env_id=source_env.id, target_env_id=target_env.id, selected_ids=[dashboard_id], - replace_db_config=_coerce_query_bool(entities.get("replace_db_config", False)), - fix_cross_filters=_coerce_query_bool(entities.get("fix_cross_filters", True)) + replace_db_config=_coerce_query_bool( + entities.get("replace_db_config", False) + ), + fix_cross_filters=_coerce_query_bool( + entities.get("fix_cross_filters", True) + ), ) service = MigrationDryRunService() source_client = SupersetClient(source_env) target_client = SupersetClient(target_env) - report = service.run(selection, source_client, target_client, db) - + report = service.run( + selection, source_client, target_client, db + ) + s = report.get("summary", {}) dash_s = s.get("dashboards", {}) charts_s = s.get("charts", {}) ds_s = s.get("datasets", {}) - + # Determine main actions counts - creates = dash_s.get("create", 0) + charts_s.get("create", 0) + ds_s.get("create", 0) - updates = dash_s.get("update", 0) + charts_s.get("update", 0) + ds_s.get("update", 0) - deletes = dash_s.get("delete", 0) + charts_s.get("delete", 0) + ds_s.get("delete", 0) - + creates = ( + dash_s.get("create", 0) + + charts_s.get("create", 0) + + ds_s.get("create", 0) + ) + updates = ( + dash_s.get("update", 0) + + charts_s.get("update", 0) + + ds_s.get("update", 0) + ) + deletes = ( + dash_s.get("delete", 0) + + charts_s.get("delete", 0) + + ds_s.get("delete", 0) + ) + text += f"\n\nОтчет dry-run:\n- Будет создано новых объектов: {creates}\n- Будет обновлено: {updates}\n- Будет удалено: {deletes}" else: text += "\n\n(Не удалось загрузить отчет dry-run: неверные окружения)." except Exception as e: import traceback - logger.warning("[assistant.dry_run_summary][failed] Exception: %s\n%s", e, traceback.format_exc()) + + logger.warning( + "[assistant.dry_run_summary][failed] Exception: %s\n%s", + e, + traceback.format_exc(), + ) text += f"\n\n(Не удалось загрузить отчет dry-run: {e})." return f"Выполнить: {text}. Подтвердите или отмените." + + # [/DEF:_confirmation_summary:Function] # [DEF:_clarification_text_for_intent:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Convert technical missing-parameter errors into user-facing clarification prompts. # @PRE: state was classified as needs_clarification for current intent/error combination. # @POST: Returned text is human-readable and actionable for target operation. -def _clarification_text_for_intent(intent: Optional[Dict[str, Any]], detail_text: str) -> str: +def _clarification_text_for_intent( + intent: Optional[Dict[str, Any]], detail_text: str +) -> str: operation = (intent or {}).get("operation") guidance_by_operation: Dict[str, str] = { "run_llm_validation": ( @@ -1225,11 +1431,13 @@ def _clarification_text_for_intent(intent: Optional[Dict[str, Any]], detail_text "run_backup": "Нужно уточнение: укажите окружение и при необходимости дашборд (id/slug/title).", } return guidance_by_operation.get(operation, detail_text) + + # [/DEF:_clarification_text_for_intent:Function] # [DEF:_plan_intent_with_llm:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Use active LLM provider to select best tool/operation from dynamic catalog. # @PRE: tools list contains allowed operations for current user. # @POST: Returns normalized intent dict when planning succeeds; otherwise None. @@ -1267,23 +1475,25 @@ async def _plan_intent_with_llm( "Choose exactly one operation from available_tools or return clarify.\n" "Output strict JSON object:\n" "{" - "\"domain\": string, " - "\"operation\": string, " - "\"entities\": object, " - "\"confidence\": number, " - "\"risk_level\": \"safe\"|\"guarded\"|\"dangerous\", " - "\"requires_confirmation\": boolean" + '"domain": string, ' + '"operation": string, ' + '"entities": object, ' + '"confidence": number, ' + '"risk_level": "safe"|"guarded"|"dangerous", ' + '"requires_confirmation": boolean' "}\n" "Rules:\n" "- Use only operation names from available_tools.\n" - "- If input is ambiguous, operation must be \"clarify\" with low confidence.\n" + '- If input is ambiguous, operation must be "clarify" with low confidence.\n' "- If dashboard is provided as name/slug (e.g., COVID), put it into entities.dashboard_ref.\n" "- Keep entities minimal and factual.\n" ) payload = { "available_tools": tools, "user_message": message, - "known_environments": [{"id": e.id, "name": e.name} for e in config_manager.get_environments()], + "known_environments": [ + {"id": e.id, "name": e.name} for e in config_manager.get_environments() + ], } try: response = await planner.get_json_completion( @@ -1294,7 +1504,10 @@ async def _plan_intent_with_llm( ) except Exception as exc: import traceback - logger.warning(f"[assistant.planner][fallback] LLM planner unavailable: {exc}\n{traceback.format_exc()}") + + logger.warning( + f"[assistant.planner][fallback] LLM planner unavailable: {exc}\n{traceback.format_exc()}" + ) return None if not isinstance(response, dict): return None @@ -1321,7 +1534,9 @@ async def _plan_intent_with_llm( "entities": response.get("entities", {}), "confidence": float(response.get("confidence", 0.75)), "risk_level": response.get("risk_level") or selected["risk_level"], - "requires_confirmation": bool(response.get("requires_confirmation", selected["requires_confirmation"])), + "requires_confirmation": bool( + response.get("requires_confirmation", selected["requires_confirmation"]) + ), } intent = _coerce_intent_entities(intent) @@ -1331,16 +1546,20 @@ async def _plan_intent_with_llm( intent["entities"][key] = value if operation in {"deploy_dashboard", "execute_migration"}: - env_token = intent["entities"].get("environment") or intent["entities"].get("target_env") + env_token = intent["entities"].get("environment") or intent["entities"].get( + "target_env" + ) if _is_production_env(env_token, config_manager): intent["risk_level"] = "dangerous" intent["requires_confirmation"] = True return intent + + # [/DEF:_plan_intent_with_llm:Function] # [DEF:_authorize_intent:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Validate user permissions for parsed intent before confirmation/dispatch. # @PRE: intent.operation is present for known assistant command domains. # @POST: Returns if authorized; raises HTTPException(403) when denied. @@ -1348,11 +1567,13 @@ def _authorize_intent(intent: Dict[str, Any], current_user: User): operation = intent.get("operation") if operation in INTENT_PERMISSION_CHECKS: _check_any_permission(current_user, INTENT_PERMISSION_CHECKS[operation]) + + # [/DEF:_authorize_intent:Function] # [DEF:_dispatch_intent:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 5 # @PURPOSE: Execute parsed assistant intent via existing task/plugin/git services. # @PRE: intent operation is known and actor permissions are validated per operation. # @POST: Returns response text, optional task id, and UI actions for follow-up. @@ -1379,7 +1600,9 @@ async def _dispatch_intent( "get_task_status": "Статус: проверка задачи", "get_health_summary": "Здоровье: сводка по дашбордам", } - available = [labels[t["operation"]] for t in tools_catalog if t["operation"] in labels] + available = [ + labels[t["operation"]] for t in tools_catalog if t["operation"] in labels + ] if not available: return "Сейчас нет доступных для вас операций ассистента.", None, [] commands = "\n".join(f"- {item}" for item in available) @@ -1392,12 +1615,17 @@ async def _dispatch_intent( if operation == "get_health_summary": from ...services.health_service import HealthService + env_token = entities.get("environment") env_id = _resolve_env_id(env_token, config_manager) service = HealthService(db) summary = await service.get_health_summary(environment_id=env_id) - - env_name = _get_environment_name_by_id(env_id, config_manager) if env_id else "всех окружений" + + env_name = ( + _get_environment_name_by_id(env_id, config_manager) + if env_id + else "всех окружений" + ) text = ( f"Сводка здоровья дашбордов для {env_name}:\n" f"- ✅ Прошли проверку: {summary.pass_count}\n" @@ -1405,11 +1633,15 @@ async def _dispatch_intent( f"- ❌ Ошибки валидации: {summary.fail_count}\n" f"- ❓ Неизвестно: {summary.unknown_count}" ) - + actions = [ - AssistantAction(type="open_route", label="Открыть Health Center", target="/dashboards/health") + AssistantAction( + type="open_route", + label="Открыть Health Center", + target="/dashboards/health", + ) ] - + if summary.fail_count > 0: text += "\n\nОбнаружены ошибки в следующих дашбордах:" for item in summary.items: @@ -1419,21 +1651,27 @@ async def _dispatch_intent( AssistantAction( type="open_route", label=f"Отчет {item.dashboard_id}", - target=f"/reports/llm/{item.task_id}" + target=f"/reports/llm/{item.task_id}", ) ) - - return text, None, actions[:5] # Limit actions to avoid UI clutter + + return text, None, actions[:5] # Limit actions to avoid UI clutter if operation == "get_task_status": _check_any_permission(current_user, [("tasks", "READ")]) task_id = entities.get("task_id") if not task_id: - recent = [t for t in task_manager.get_tasks(limit=20, offset=0) if t.user_id == current_user.id] + recent = [ + t + for t in task_manager.get_tasks(limit=20, offset=0) + if t.user_id == current_user.id + ] if not recent: return "У вас пока нет задач в истории.", None, [] task = recent[0] - actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)] + actions = [ + AssistantAction(type="open_task", label="Open Task", target=task.id) + ] if str(task.status).upper() in {"SUCCESS", "FAILED"}: actions.extend(_extract_result_deep_links(task, config_manager)) summary_line = _build_task_observability_summary(task, config_manager) @@ -1463,7 +1701,10 @@ async def _dispatch_intent( dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) branch_name = entities.get("branch_name") if not dashboard_id or not branch_name: - raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref or branch_name") + raise HTTPException( + status_code=422, + detail="Missing dashboard_id/dashboard_ref or branch_name", + ) git_service.create_branch(dashboard_id, branch_name, "main") return f"Ветка `{branch_name}` создана для дашборда {dashboard_id}.", None, [] @@ -1472,7 +1713,9 @@ async def _dispatch_intent( dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) commit_message = entities.get("message") if not dashboard_id: - raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref") + raise HTTPException( + status_code=422, detail="Missing dashboard_id/dashboard_ref" + ) git_service.commit_changes(dashboard_id, commit_message, None) return "Коммит выполнен успешно.", None, [] @@ -1480,9 +1723,14 @@ async def _dispatch_intent( _check_any_permission(current_user, [("plugin:git", "EXECUTE")]) env_token = entities.get("environment") env_id = _resolve_env_id(env_token, config_manager) - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + dashboard_id = _resolve_dashboard_id_entity( + entities, config_manager, env_hint=env_token + ) if not dashboard_id or not env_id: - raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref or environment") + raise HTTPException( + status_code=422, + detail="Missing dashboard_id/dashboard_ref or environment", + ) task = await task_manager.create_task( plugin_id="git-integration", @@ -1498,27 +1746,40 @@ async def _dispatch_intent( task.id, [ AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction(type="open_reports", label="Open Reports", target="/reports"), + AssistantAction( + type="open_reports", label="Open Reports", target="/reports" + ), ], ) if operation == "execute_migration": - _check_any_permission(current_user, [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")]) + _check_any_permission( + current_user, + [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")], + ) src_token = entities.get("source_env") dashboard_ref = entities.get("dashboard_ref") - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token) + dashboard_id = _resolve_dashboard_id_entity( + entities, config_manager, env_hint=src_token + ) src = _resolve_env_id(src_token, config_manager) tgt = _resolve_env_id(entities.get("target_env"), config_manager) if not src or not tgt: raise HTTPException(status_code=422, detail="Missing source_env/target_env") if not dashboard_id and not dashboard_ref: - raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref") + raise HTTPException( + status_code=422, detail="Missing dashboard_id/dashboard_ref" + ) migration_params: Dict[str, Any] = { "source_env_id": src, "target_env_id": tgt, - "replace_db_config": _coerce_query_bool(entities.get("replace_db_config", False)), - "fix_cross_filters": _coerce_query_bool(entities.get("fix_cross_filters", True)), + "replace_db_config": _coerce_query_bool( + entities.get("replace_db_config", False) + ), + "fix_cross_filters": _coerce_query_bool( + entities.get("fix_cross_filters", True) + ), } if dashboard_id: migration_params["selected_ids"] = [dashboard_id] @@ -1536,7 +1797,9 @@ async def _dispatch_intent( task.id, [ AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction(type="open_reports", label="Open Reports", target="/reports"), + AssistantAction( + type="open_reports", label="Open Reports", target="/reports" + ), *( [ AssistantAction( @@ -1544,7 +1807,11 @@ async def _dispatch_intent( label=f"Открыть дашборд в {_get_environment_name_by_id(tgt, config_manager)}", target=f"/dashboards/{dashboard_id}?env_id={tgt}", ), - AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)), + AssistantAction( + type="open_diff", + label="Показать Diff", + target=str(dashboard_id), + ), ] if dashboard_id else [] @@ -1553,17 +1820,26 @@ async def _dispatch_intent( ) if operation == "run_backup": - _check_any_permission(current_user, [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")]) + _check_any_permission( + current_user, + [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")], + ) env_token = entities.get("environment") env_id = _resolve_env_id(env_token, config_manager) if not env_id: - raise HTTPException(status_code=400, detail="Missing or unknown environment") + raise HTTPException( + status_code=400, detail="Missing or unknown environment" + ) params: Dict[str, Any] = {"environment_id": env_id} if entities.get("dashboard_id") or entities.get("dashboard_ref"): - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + dashboard_id = _resolve_dashboard_id_entity( + entities, config_manager, env_hint=env_token + ) if not dashboard_id: - raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref") + raise HTTPException( + status_code=422, detail="Missing dashboard_id/dashboard_ref" + ) params["dashboard_ids"] = [dashboard_id] task = await task_manager.create_task( @@ -1576,7 +1852,9 @@ async def _dispatch_intent( task.id, [ AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction(type="open_reports", label="Open Reports", target="/reports"), + AssistantAction( + type="open_reports", label="Open Reports", target="/reports" + ), *( [ AssistantAction( @@ -1584,7 +1862,11 @@ async def _dispatch_intent( label=f"Открыть дашборд в {_get_environment_name_by_id(env_id, config_manager)}", target=f"/dashboards/{dashboard_id}?env_id={env_id}", ), - AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)), + AssistantAction( + type="open_diff", + label="Показать Diff", + target=str(dashboard_id), + ), ] if entities.get("dashboard_id") or entities.get("dashboard_ref") else [] @@ -1593,10 +1875,16 @@ async def _dispatch_intent( ) if operation == "run_llm_validation": - _check_any_permission(current_user, [("plugin:llm_dashboard_validation", "EXECUTE")]) + _check_any_permission( + current_user, [("plugin:llm_dashboard_validation", "EXECUTE")] + ) env_token = entities.get("environment") - env_id = _resolve_env_id(env_token, config_manager) or _get_default_environment_id(config_manager) - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + env_id = _resolve_env_id( + env_token, config_manager + ) or _get_default_environment_id(config_manager) + dashboard_id = _resolve_dashboard_id_entity( + entities, config_manager, env_hint=env_token + ) provider_id = _resolve_provider_id( entities.get("provider"), db, @@ -1610,7 +1898,9 @@ async def _dispatch_intent( ) provider = LLMProviderService(db).get_provider(provider_id) provider_model = provider.default_model if provider else "" - if not is_multimodal_model(provider_model, provider.provider_type if provider else None): + if not is_multimodal_model( + provider_model, provider.provider_type if provider else None + ): raise HTTPException( status_code=422, detail=( @@ -1633,7 +1923,9 @@ async def _dispatch_intent( task.id, [ AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction(type="open_reports", label="Open Reports", target="/reports"), + AssistantAction( + type="open_reports", label="Open Reports", target="/reports" + ), ], ) @@ -1648,7 +1940,9 @@ async def _dispatch_intent( task_key="documentation", ) if not dataset_id or not env_id or not provider_id: - raise HTTPException(status_code=400, detail="Missing dataset_id/environment/provider") + raise HTTPException( + status_code=400, detail="Missing dataset_id/environment/provider" + ) task = await task_manager.create_task( plugin_id="llm_documentation", @@ -1664,17 +1958,21 @@ async def _dispatch_intent( task.id, [ AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction(type="open_reports", label="Open Reports", target="/reports"), + AssistantAction( + type="open_reports", label="Open Reports", target="/reports" + ), ], ) raise HTTPException(status_code=400, detail="Unsupported operation") + + # [/DEF:_dispatch_intent:Function] @router.post("/messages", response_model=AssistantMessageResponse) # [DEF:send_message:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 5 # @PURPOSE: Parse assistant command, enforce safety gates, and dispatch executable intent. # @PRE: Authenticated user is available and message text is non-empty. # @POST: Response state is one of clarification/confirmation/started/success/denied/failed. @@ -1688,7 +1986,9 @@ async def send_message( ): with belief_scope("assistant.send_message"): user_id = current_user.id - conversation_id = _resolve_or_create_conversation(user_id, request.conversation_id, db) + conversation_id = _resolve_or_create_conversation( + user_id, request.conversation_id, db + ) _append_history(user_id, conversation_id, "user", request.message) _persist_message(db, user_id, conversation_id, "user", request.message) @@ -1696,7 +1996,9 @@ async def send_message( tools_catalog = _build_tool_catalog(current_user, config_manager, db) intent = None try: - intent = await _plan_intent_with_llm(request.message, tools_catalog, db, config_manager) + intent = await _plan_intent_with_llm( + request.message, tools_catalog, db, config_manager + ) except Exception as exc: logger.warning(f"[assistant.planner][fallback] Planner error: {exc}") if not intent: @@ -1705,9 +2007,23 @@ async def send_message( if intent.get("domain") == "unknown" or confidence < 0.6: text = "Команда неоднозначна. Уточните действие: git / migration / backup / llm / status." - _append_history(user_id, conversation_id, "assistant", text, state="needs_clarification") - _persist_message(db, user_id, conversation_id, "assistant", text, state="needs_clarification", metadata={"intent": intent}) - audit_payload = {"decision": "needs_clarification", "message": request.message, "intent": intent} + _append_history( + user_id, conversation_id, "assistant", text, state="needs_clarification" + ) + _persist_message( + db, + user_id, + conversation_id, + "assistant", + text, + state="needs_clarification", + metadata={"intent": intent}, + ) + audit_payload = { + "decision": "needs_clarification", + "message": request.message, + "intent": intent, + } _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) return AssistantMessageResponse( @@ -1757,8 +2073,16 @@ async def send_message( metadata={ "intent": intent, "actions": [ - {"type": "confirm", "label": "✅ Подтвердить", "target": confirmation_id}, - {"type": "cancel", "label": "❌ Отменить", "target": confirmation_id}, + { + "type": "confirm", + "label": "✅ Подтвердить", + "target": confirmation_id, + }, + { + "type": "cancel", + "label": "❌ Отменить", + "target": confirmation_id, + }, ], }, ) @@ -1778,16 +2102,31 @@ async def send_message( intent=intent, confirmation_id=confirmation_id, actions=[ - AssistantAction(type="confirm", label="✅ Подтвердить", target=confirmation_id), - AssistantAction(type="cancel", label="❌ Отменить", target=confirmation_id), + AssistantAction( + type="confirm", + label="✅ Подтвердить", + target=confirmation_id, + ), + AssistantAction( + type="cancel", label="❌ Отменить", target=confirmation_id + ), ], created_at=datetime.utcnow(), ) # Read-only operations execute immediately - text, task_id, actions = await _dispatch_intent(intent, current_user, task_manager, config_manager, db) + text, task_id, actions = await _dispatch_intent( + intent, current_user, task_manager, config_manager, db + ) state = "started" if task_id else "success" - _append_history(user_id, conversation_id, "assistant", text, state=state, task_id=task_id) + _append_history( + user_id, + conversation_id, + "assistant", + text, + state=state, + task_id=task_id, + ) _persist_message( db, user_id, @@ -1796,9 +2135,17 @@ async def send_message( text, state=state, task_id=task_id, - metadata={"intent": intent, "actions": [a.model_dump() for a in actions]}, + metadata={ + "intent": intent, + "actions": [a.model_dump() for a in actions], + }, ) - audit_payload = {"decision": "executed", "message": request.message, "intent": intent, "task_id": task_id} + audit_payload = { + "decision": "executed", + "message": request.message, + "intent": intent, + "task_id": task_id, + } _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) return AssistantMessageResponse( @@ -1824,10 +2171,27 @@ async def send_message( state = "needs_clarification" else: state = "failed" - text = _clarification_text_for_intent(intent, detail_text) if state == "needs_clarification" else detail_text + text = ( + _clarification_text_for_intent(intent, detail_text) + if state == "needs_clarification" + else detail_text + ) _append_history(user_id, conversation_id, "assistant", text, state=state) - _persist_message(db, user_id, conversation_id, "assistant", text, state=state, metadata={"intent": intent}) - audit_payload = {"decision": state, "message": request.message, "intent": intent, "error": text} + _persist_message( + db, + user_id, + conversation_id, + "assistant", + text, + state=state, + metadata={"intent": intent}, + ) + audit_payload = { + "decision": state, + "message": request.message, + "intent": intent, + "error": text, + } _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) return AssistantMessageResponse( @@ -1836,15 +2200,21 @@ async def send_message( state=state, text=text, intent=intent, - actions=[AssistantAction(type="rephrase", label="Rephrase command")] if state == "needs_clarification" else [], + actions=[AssistantAction(type="rephrase", label="Rephrase command")] + if state == "needs_clarification" + else [], created_at=datetime.utcnow(), ) + + # [/DEF:send_message:Function] -@router.post("/confirmations/{confirmation_id}/confirm", response_model=AssistantMessageResponse) +@router.post( + "/confirmations/{confirmation_id}/confirm", response_model=AssistantMessageResponse +) # [DEF:confirm_operation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Execute previously requested risky operation after explicit user confirmation. # @PRE: confirmation_id exists, belongs to current user, is pending, and not expired. # @POST: Confirmation state becomes consumed and operation result is persisted in history. @@ -1866,10 +2236,14 @@ async def confirm_operation( raise HTTPException(status_code=404, detail="Confirmation not found") if record.user_id != current_user.id: - raise HTTPException(status_code=403, detail="Confirmation does not belong to current user") + raise HTTPException( + status_code=403, detail="Confirmation does not belong to current user" + ) if record.state != "pending": - raise HTTPException(status_code=400, detail=f"Confirmation already {record.state}") + raise HTTPException( + status_code=400, detail=f"Confirmation already {record.state}" + ) if datetime.utcnow() > record.expires_at: record.state = "expired" @@ -1877,11 +2251,20 @@ async def confirm_operation( raise HTTPException(status_code=400, detail="Confirmation expired") intent = record.intent - text, task_id, actions = await _dispatch_intent(intent, current_user, task_manager, config_manager, db) + text, task_id, actions = await _dispatch_intent( + intent, current_user, task_manager, config_manager, db + ) record.state = "consumed" _update_confirmation_state(db, confirmation_id, "consumed") - _append_history(current_user.id, record.conversation_id, "assistant", text, state="started" if task_id else "success", task_id=task_id) + _append_history( + current_user.id, + record.conversation_id, + "assistant", + text, + state="started" if task_id else "success", + task_id=task_id, + ) _persist_message( db, current_user.id, @@ -1892,7 +2275,12 @@ async def confirm_operation( task_id=task_id, metadata={"intent": intent, "confirmation_id": confirmation_id}, ) - audit_payload = {"decision": "confirmed_execute", "confirmation_id": confirmation_id, "task_id": task_id, "intent": intent} + audit_payload = { + "decision": "confirmed_execute", + "confirmation_id": confirmation_id, + "task_id": task_id, + "intent": intent, + } _audit(current_user.id, audit_payload) _persist_audit(db, current_user.id, audit_payload, record.conversation_id) @@ -1906,12 +2294,16 @@ async def confirm_operation( actions=actions, created_at=datetime.utcnow(), ) + + # [/DEF:confirm_operation:Function] -@router.post("/confirmations/{confirmation_id}/cancel", response_model=AssistantMessageResponse) +@router.post( + "/confirmations/{confirmation_id}/cancel", response_model=AssistantMessageResponse +) # [DEF:cancel_operation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Cancel pending risky operation and mark confirmation token as cancelled. # @PRE: confirmation_id exists, belongs to current user, and is still pending. # @POST: Confirmation becomes cancelled and cannot be executed anymore. @@ -1931,15 +2323,26 @@ async def cancel_operation( raise HTTPException(status_code=404, detail="Confirmation not found") if record.user_id != current_user.id: - raise HTTPException(status_code=403, detail="Confirmation does not belong to current user") + raise HTTPException( + status_code=403, detail="Confirmation does not belong to current user" + ) if record.state != "pending": - raise HTTPException(status_code=400, detail=f"Confirmation already {record.state}") + raise HTTPException( + status_code=400, detail=f"Confirmation already {record.state}" + ) record.state = "cancelled" _update_confirmation_state(db, confirmation_id, "cancelled") text = "Операция отменена. Выполнение не запускалось." - _append_history(current_user.id, record.conversation_id, "assistant", text, state="success", confirmation_id=confirmation_id) + _append_history( + current_user.id, + record.conversation_id, + "assistant", + text, + state="success", + confirmation_id=confirmation_id, + ) _persist_message( db, current_user.id, @@ -1950,7 +2353,11 @@ async def cancel_operation( confirmation_id=confirmation_id, metadata={"intent": record.intent}, ) - audit_payload = {"decision": "cancelled", "confirmation_id": confirmation_id, "intent": record.intent} + audit_payload = { + "decision": "cancelled", + "confirmation_id": confirmation_id, + "intent": record.intent, + } _audit(current_user.id, audit_payload) _persist_audit(db, current_user.id, audit_payload, record.conversation_id) @@ -1964,11 +2371,13 @@ async def cancel_operation( actions=[], created_at=datetime.utcnow(), ) + + # [/DEF:cancel_operation:Function] # [DEF:list_conversations:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Return paginated conversation list for current user with archived flag and last message preview. # @PRE: Authenticated user context and valid pagination params. # @POST: Conversations are grouped by conversation_id sorted by latest activity descending. @@ -2020,14 +2429,20 @@ async def list_conversations( items = [] search_term = search.lower().strip() if search else "" - archived_total = sum(1 for c in summary.values() if _is_conversation_archived(c.get("updated_at"))) + archived_total = sum( + 1 + for c in summary.values() + if _is_conversation_archived(c.get("updated_at")) + ) active_total = len(summary) - archived_total for conv in summary.values(): conv["archived"] = _is_conversation_archived(conv.get("updated_at")) if not conv.get("title"): conv["title"] = f"Conversation {conv['conversation_id'][:8]}" if search_term: - haystack = f"{conv.get('title', '')} {conv.get('last_message', '')}".lower() + haystack = ( + f"{conv.get('title', '')} {conv.get('last_message', '')}".lower() + ) if search_term not in haystack: continue if archived_only and not conv["archived"]: @@ -2035,7 +2450,9 @@ async def list_conversations( if not archived_only and not include_archived and conv["archived"]: continue updated = conv.get("updated_at") - conv["updated_at"] = updated.isoformat() if isinstance(updated, datetime) else None + conv["updated_at"] = ( + updated.isoformat() if isinstance(updated, datetime) else None + ) items.append(conv) items.sort(key=lambda x: x.get("updated_at") or "", reverse=True) @@ -2052,11 +2469,13 @@ async def list_conversations( "active_total": active_total, "archived_total": archived_total, } + + # [/DEF:list_conversations:Function] # [DEF:delete_conversation:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Soft-delete or hard-delete a conversation and clear its in-memory trace. # @PRE: conversation_id belongs to current_user. # @POST: Conversation records are removed from DB and CONVERSATIONS cache. @@ -2068,24 +2487,36 @@ async def delete_conversation( ): with belief_scope("assistant.conversations.delete"): user_id = current_user.id - + # 1. Remove from in-memory cache key = (user_id, conversation_id) if key in CONVERSATIONS: del CONVERSATIONS[key] - + # 2. Delete from database - deleted_count = db.query(AssistantMessageRecord).filter( - AssistantMessageRecord.user_id == user_id, - AssistantMessageRecord.conversation_id == conversation_id - ).delete() - + deleted_count = ( + db.query(AssistantMessageRecord) + .filter( + AssistantMessageRecord.user_id == user_id, + AssistantMessageRecord.conversation_id == conversation_id, + ) + .delete() + ) + db.commit() - + if deleted_count == 0: - raise HTTPException(status_code=404, detail="Conversation not found or already deleted") - - return {"status": "success", "deleted": deleted_count, "conversation_id": conversation_id} + raise HTTPException( + status_code=404, detail="Conversation not found or already deleted" + ) + + return { + "status": "success", + "deleted": deleted_count, + "conversation_id": conversation_id, + } + + # [/DEF:delete_conversation:Function] @@ -2108,19 +2539,15 @@ async def get_history( _cleanup_history_ttl(db, user_id) conv_id = _resolve_or_create_conversation(user_id, conversation_id, db) - base_query = ( - db.query(AssistantMessageRecord) - .filter( - AssistantMessageRecord.user_id == user_id, - AssistantMessageRecord.conversation_id == conv_id, - ) + base_query = db.query(AssistantMessageRecord).filter( + AssistantMessageRecord.user_id == user_id, + AssistantMessageRecord.conversation_id == conv_id, ) total = base_query.count() start = (page - 1) * page_size if from_latest: rows = ( - base_query - .order_by(desc(AssistantMessageRecord.created_at)) + base_query.order_by(desc(AssistantMessageRecord.created_at)) .offset(start) .limit(page_size) .all() @@ -2128,8 +2555,7 @@ async def get_history( rows = list(reversed(rows)) else: rows = ( - base_query - .order_by(AssistantMessageRecord.created_at.asc()) + base_query.order_by(AssistantMessageRecord.created_at.asc()) .offset(start) .limit(page_size) .all() @@ -2161,6 +2587,8 @@ async def get_history( "from_latest": from_latest, "conversation_id": conv_id, } + + # [/DEF:get_history:Function] @@ -2204,6 +2632,8 @@ async def get_assistant_audit( "total": len(persistent), "memory_total": len(memory_rows), } + + # [/DEF:get_assistant_audit:Function] -# [/DEF:backend.src.api.routes.assistant:Module] +# [/DEF:AssistantApi:Module] diff --git a/backend/src/api/routes/dashboards.py b/backend/src/api/routes/dashboards.py index 605bc05c..10a0e720 100644 --- a/backend/src/api/routes/dashboards.py +++ b/backend/src/api/routes/dashboards.py @@ -1,12 +1,12 @@ -# [DEF:backend.src.api.routes.dashboards:Module] +# [DEF:DashboardsApi:Module] # # @COMPLEXITY: 5 # @SEMANTICS: api, dashboards, resources, hub # @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status # @LAYER: API # @RELATION: DEPENDS_ON ->[AppDependencies] -# @RELATION: DEPENDS_ON ->[backend.src.services.resource_service.ResourceService] -# @RELATION: DEPENDS_ON ->[backend.src.core.superset_client.SupersetClient] +# @RELATION: DEPENDS_ON ->[ResourceService] +# @RELATION: DEPENDS_ON ->[SupersetClient] # # @INVARIANT: All dashboard responses include git_status and last_task metadata # @@ -65,18 +65,22 @@ from ...services.resource_service import ResourceService router = APIRouter(prefix="/api/dashboards", tags=["Dashboards"]) + # [DEF:GitStatus:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for dashboard Git synchronization status. class GitStatus(BaseModel): branch: Optional[str] = None sync_status: Optional[str] = Field(None, pattern="^OK|DIFF|NO_REPO|ERROR$") has_repo: Optional[bool] = None has_changes_for_commit: Optional[bool] = None + + # [/DEF:GitStatus:DataClass] + # [DEF:LastTask:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: DTO for the most recent background task associated with a dashboard. class LastTask(BaseModel): task_id: Optional[str] = None @@ -85,10 +89,13 @@ class LastTask(BaseModel): pattern="^PENDING|RUNNING|SUCCESS|FAILED|ERROR|AWAITING_INPUT|WAITING_INPUT|AWAITING_MAPPING$", ) validation_status: Optional[str] = Field(None, pattern="^PASS|FAIL|WARN|UNKNOWN$") + + # [/DEF:LastTask:DataClass] + # [DEF:DashboardItem:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO representing a single dashboard with projected metadata. class DashboardItem(BaseModel): id: int @@ -101,10 +108,13 @@ class DashboardItem(BaseModel): owners: Optional[List[str]] = None git_status: Optional[GitStatus] = None last_task: Optional[LastTask] = None + + # [/DEF:DashboardItem:DataClass] + # [DEF:EffectiveProfileFilter:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Metadata about applied profile filters for UI context. class EffectiveProfileFilter(BaseModel): applied: bool @@ -114,10 +124,13 @@ class EffectiveProfileFilter(BaseModel): match_logic: Optional[ Literal["owners_or_modified_by", "slug_only", "owners_or_modified_by+slug_only"] ] = None + + # [/DEF:EffectiveProfileFilter:DataClass] + # [DEF:DashboardsResponse:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Envelope DTO for paginated dashboards list. class DashboardsResponse(BaseModel): dashboards: List[DashboardItem] @@ -126,10 +139,13 @@ class DashboardsResponse(BaseModel): page_size: int total_pages: int effective_profile_filter: Optional[EffectiveProfileFilter] = None + + # [/DEF:DashboardsResponse:DataClass] + # [DEF:DashboardChartItem:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for a chart linked to a dashboard. class DashboardChartItem(BaseModel): id: int @@ -138,10 +154,13 @@ class DashboardChartItem(BaseModel): dataset_id: Optional[int] = None last_modified: Optional[str] = None overview: Optional[str] = None + + # [/DEF:DashboardChartItem:DataClass] + # [DEF:DashboardDatasetItem:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for a dataset associated with a dashboard. class DashboardDatasetItem(BaseModel): id: int @@ -150,10 +169,13 @@ class DashboardDatasetItem(BaseModel): database: str last_modified: Optional[str] = None overview: Optional[str] = None + + # [/DEF:DashboardDatasetItem:DataClass] + # [DEF:DashboardDetailResponse:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Detailed dashboard metadata including children. class DashboardDetailResponse(BaseModel): id: int @@ -167,10 +189,13 @@ class DashboardDetailResponse(BaseModel): datasets: List[DashboardDatasetItem] chart_count: int dataset_count: int + + # [/DEF:DashboardDetailResponse:DataClass] + # [DEF:DashboardTaskHistoryItem:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Individual history record entry. class DashboardTaskHistoryItem(BaseModel): id: str @@ -181,18 +206,24 @@ class DashboardTaskHistoryItem(BaseModel): finished_at: Optional[str] = None env_id: Optional[str] = None summary: Optional[str] = None + + # [/DEF:DashboardTaskHistoryItem:DataClass] + # [DEF:DashboardTaskHistoryResponse:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Collection DTO for task history. class DashboardTaskHistoryResponse(BaseModel): dashboard_id: int items: List[DashboardTaskHistoryItem] + + # [/DEF:DashboardTaskHistoryResponse:DataClass] + # [DEF:DatabaseMapping:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: DTO for cross-environment database ID mapping. class DatabaseMapping(BaseModel): source_db: str @@ -200,18 +231,23 @@ class DatabaseMapping(BaseModel): source_db_uuid: Optional[str] = None target_db_uuid: Optional[str] = None confidence: float + + # [/DEF:DatabaseMapping:DataClass] + # [DEF:DatabaseMappingsResponse:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: Wrapper for database mappings. class DatabaseMappingsResponse(BaseModel): mappings: List[DatabaseMapping] + + # [/DEF:DatabaseMappingsResponse:DataClass] # [DEF:_find_dashboard_id_by_slug:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard numeric ID by slug using Superset list endpoint. # @PRE: `dashboard_slug` is non-empty. # @POST: Returns dashboard ID when found, otherwise None. @@ -220,8 +256,16 @@ def _find_dashboard_id_by_slug( dashboard_slug: str, ) -> Optional[int]: query_variants = [ - {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, - {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, + { + "filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, + { + "filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, ] for query in query_variants: @@ -235,11 +279,13 @@ def _find_dashboard_id_by_slug( continue return None + + # [/DEF:_find_dashboard_id_by_slug:Function] # [DEF:_resolve_dashboard_id_from_ref:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard ID from slug-first reference with numeric fallback. # @PRE: `dashboard_ref` is provided in route path. # @POST: Returns a valid dashboard ID or raises HTTPException(404). @@ -260,11 +306,13 @@ def _resolve_dashboard_id_from_ref( return int(normalized_ref) raise HTTPException(status_code=404, detail="Dashboard not found") + + # [/DEF:_resolve_dashboard_id_from_ref:Function] # [DEF:_find_dashboard_id_by_slug_async:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard numeric ID by slug using async Superset list endpoint. # @PRE: dashboard_slug is non-empty. # @POST: Returns dashboard ID when found, otherwise None. @@ -273,8 +321,16 @@ async def _find_dashboard_id_by_slug_async( dashboard_slug: str, ) -> Optional[int]: query_variants = [ - {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, - {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, + { + "filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, + { + "filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, ] for query in query_variants: @@ -288,11 +344,13 @@ async def _find_dashboard_id_by_slug_async( continue return None + + # [/DEF:_find_dashboard_id_by_slug_async:Function] # [DEF:_resolve_dashboard_id_from_ref_async:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard ID from slug-first reference using async Superset client. # @PRE: dashboard_ref is provided in route path. # @POST: Returns valid dashboard ID or raises HTTPException(404). @@ -312,11 +370,13 @@ async def _resolve_dashboard_id_from_ref_async( return int(normalized_ref) raise HTTPException(status_code=404, detail="Dashboard not found") + + # [/DEF:_resolve_dashboard_id_from_ref_async:Function] # [DEF:_normalize_filter_values:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Normalize query filter values to lower-cased non-empty tokens. # @PRE: values may be None or list of strings. # @POST: Returns trimmed normalized list preserving input order. @@ -329,11 +389,13 @@ def _normalize_filter_values(values: Optional[List[str]]) -> List[str]: if token: normalized.append(token) return normalized + + # [/DEF:_normalize_filter_values:Function] # [DEF:_dashboard_git_filter_value:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Build comparable git status token for dashboards filtering. # @PRE: dashboard payload may contain git_status or None. # @POST: Returns one of ok|diff|no_repo|error|pending. @@ -350,21 +412,26 @@ def _dashboard_git_filter_value(dashboard: Dict[str, Any]) -> str: if sync_status == "ERROR": return "error" return "pending" + + # [/DEF:_dashboard_git_filter_value:Function] + # [DEF:_normalize_actor_alias_token:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Normalize actor alias token to comparable trim+lower text. # @PRE: value can be scalar/None. # @POST: Returns normalized token or None. def _normalize_actor_alias_token(value: Any) -> Optional[str]: token = str(value or "").strip().lower() return token or None + + # [/DEF:_normalize_actor_alias_token:Function] # [DEF:_normalize_owner_display_token:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Project owner payload value into stable display string for API response contracts. # @PRE: owner can be scalar, dict or None. # @POST: Returns trimmed non-empty owner display token or None. @@ -373,7 +440,9 @@ def _normalize_owner_display_token(owner: Any) -> Optional[str]: return None if isinstance(owner, dict): - username = str(owner.get("username") or owner.get("user_name") or owner.get("name") or "").strip() + username = str( + owner.get("username") or owner.get("user_name") or owner.get("name") or "" + ).strip() full_name = str(owner.get("full_name") or "").strip() first_name = str(owner.get("first_name") or "").strip() last_name = str(owner.get("last_name") or "").strip() @@ -387,11 +456,13 @@ def _normalize_owner_display_token(owner: Any) -> Optional[str]: normalized = str(owner).strip() return normalized or None + + # [/DEF:_normalize_owner_display_token:Function] # [DEF:_normalize_dashboard_owner_values:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Normalize dashboard owners payload to optional list of display strings. # @PRE: owners payload can be None, scalar, or list with mixed values. # @POST: Returns deduplicated owner labels preserving order, or None when absent. @@ -412,15 +483,19 @@ def _normalize_dashboard_owner_values(owners: Any) -> Optional[List[str]]: normalized.append(token) return normalized + + # [/DEF:_normalize_dashboard_owner_values:Function] # [DEF:_project_dashboard_response_items:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Project dashboard payloads to response-contract-safe shape. # @PRE: dashboards is a list of dict-like dashboard payloads. # @POST: Returned items satisfy DashboardItem owners=list[str]|None contract. -def _project_dashboard_response_items(dashboards: List[Dict[str, Any]]) -> List[Dict[str, Any]]: +def _project_dashboard_response_items( + dashboards: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: projected: List[Dict[str, Any]] = [] for dashboard in dashboards: projected_dashboard = dict(dashboard) @@ -429,15 +504,19 @@ def _project_dashboard_response_items(dashboards: List[Dict[str, Any]]) -> List[ ) projected.append(projected_dashboard) return projected + + # [/DEF:_project_dashboard_response_items:Function] # [DEF:_get_profile_filter_binding:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard profile-filter binding through current or legacy profile service contracts. # @PRE: profile_service implements get_dashboard_filter_binding or get_my_preference. # @POST: Returns normalized binding payload with deterministic defaults. -def _get_profile_filter_binding(profile_service: Any, current_user: User) -> Dict[str, Any]: +def _get_profile_filter_binding( + profile_service: Any, current_user: User +) -> Dict[str, Any]: def _read_optional_string(value: Any) -> Optional[str]: return value if isinstance(value, str) else None @@ -448,7 +527,9 @@ def _get_profile_filter_binding(profile_service: Any, current_user: User) -> Dic binding = profile_service.get_dashboard_filter_binding(current_user) if isinstance(binding, dict): return { - "superset_username": _read_optional_string(binding.get("superset_username")), + "superset_username": _read_optional_string( + binding.get("superset_username") + ), "superset_username_normalized": _read_optional_string( binding.get("superset_username_normalized") ), @@ -482,11 +563,13 @@ def _get_profile_filter_binding(profile_service: Any, current_user: User) -> Dic "show_only_my_dashboards": False, "show_only_slug_dashboards": False, } + + # [/DEF:_get_profile_filter_binding:Function] # [DEF:_resolve_profile_actor_aliases:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve stable actor aliases for profile filtering without per-dashboard detail fan-out. # @PRE: bound username is available and env is valid. # @POST: Returns at least normalized username; may include Superset display-name alias. @@ -511,9 +594,7 @@ def _resolve_profile_actor_aliases(env: Any, bound_username: str) -> List[str]: sort_order="asc", ) lookup_items = ( - lookup_payload.get("items", []) - if isinstance(lookup_payload, dict) - else [] + lookup_payload.get("items", []) if isinstance(lookup_payload, dict) else [] ) matched_item: Optional[Dict[str, Any]] = None @@ -547,11 +628,13 @@ def _resolve_profile_actor_aliases(env: Any, bound_username: str) -> List[str]: f"(env={getattr(env, 'id', None)}, bound_username={normalized_bound!r}): {alias_error}" ) return aliases + + # [/DEF:_resolve_profile_actor_aliases:Function] # [DEF:_matches_dashboard_actor_aliases:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Apply profile actor matching against multiple aliases (username + optional display name). # @PRE: actor_aliases contains normalized non-empty tokens. # @POST: Returns True when any alias matches owners OR modified_by. @@ -569,6 +652,8 @@ def _matches_dashboard_actor_aliases( ): return True return False + + # [/DEF:_matches_dashboard_actor_aliases:Function] @@ -593,7 +678,9 @@ async def get_dashboards( search: Optional[str] = None, page: int = 1, page_size: int = 10, - page_context: Literal["dashboards_main", "other"] = Query(default="dashboards_main"), + page_context: Literal["dashboards_main", "other"] = Query( + default="dashboards_main" + ), apply_profile_default: bool = Query(default=True), override_show_all: bool = Query(default=False), filter_title: Optional[List[str]] = Query(default=None), @@ -606,7 +693,7 @@ async def get_dashboards( resource_service=Depends(get_resource_service), current_user: User = Depends(get_current_user), db: Session = Depends(get_db), - _ = Depends(has_permission("plugin:migration", "READ")) + _=Depends(has_permission("plugin:migration", "READ")), ): with belief_scope( "get_dashboards", @@ -620,13 +707,19 @@ async def get_dashboards( logger.error(f"[get_dashboards][Coherence:Failed] Invalid page: {page}") raise HTTPException(status_code=400, detail="Page must be >= 1") if page_size < 1 or page_size > 100: - logger.error(f"[get_dashboards][Coherence:Failed] Invalid page_size: {page_size}") - raise HTTPException(status_code=400, detail="Page size must be between 1 and 100") + logger.error( + f"[get_dashboards][Coherence:Failed] Invalid page_size: {page_size}" + ) + raise HTTPException( + status_code=400, detail="Page size must be between 1 and 100" + ) environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: - logger.error(f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}") + logger.error( + f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}" + ) raise HTTPException(status_code=404, detail="Environment not found") bound_username: Optional[str] = None @@ -644,16 +737,24 @@ async def get_dashboards( try: profile_service_module = getattr(ProfileService, "__module__", "") is_mock_db = db.__class__.__module__.startswith("unittest.mock") - use_profile_service = (not is_mock_db) or profile_service_module.startswith("unittest.mock") + use_profile_service = (not is_mock_db) or profile_service_module.startswith( + "unittest.mock" + ) if use_profile_service: profile_service = ProfileService(db=db, config_manager=config_manager) - profile_preference = _get_profile_filter_binding(profile_service, current_user) - normalized_username = str( - profile_preference.get("superset_username_normalized") or "" - ).strip().lower() - raw_username = str( - profile_preference.get("superset_username") or "" - ).strip().lower() + profile_preference = _get_profile_filter_binding( + profile_service, current_user + ) + normalized_username = ( + str(profile_preference.get("superset_username_normalized") or "") + .strip() + .lower() + ) + raw_username = ( + str(profile_preference.get("superset_username") or "") + .strip() + .lower() + ) bound_username = normalized_username or raw_username or None can_apply_profile_filter = ( @@ -706,18 +807,24 @@ async def get_dashboards( actor_filters, ) ) - needs_full_scan = has_column_filters or bool(can_apply_profile_filter) or bool(can_apply_slug_filter) + needs_full_scan = ( + has_column_filters + or bool(can_apply_profile_filter) + or bool(can_apply_slug_filter) + ) if isinstance(resource_service, ResourceService) and not needs_full_scan: try: - page_payload = await resource_service.get_dashboards_page_with_status( - env, - all_tasks, - page=page, - page_size=page_size, - search=search, - include_git_status=False, - require_slug=bool(can_apply_slug_filter), + page_payload = ( + await resource_service.get_dashboards_page_with_status( + env, + all_tasks, + page=page, + page_size=page_size, + search=search, + include_git_status=False, + require_slug=bool(can_apply_slug_filter), + ) ) paginated_dashboards = page_payload["dashboards"] total = page_payload["total"] @@ -744,13 +851,16 @@ async def get_dashboards( if search: search_lower = search.lower() dashboards = [ - d for d in dashboards + d + for d in dashboards if search_lower in d.get("title", "").lower() or search_lower in d.get("slug", "").lower() ] total = len(dashboards) - total_pages = (total + page_size - 1) // page_size if total > 0 else 1 + total_pages = ( + (total + page_size - 1) // page_size if total > 0 else 1 + ) start_idx = (page - 1) * page_size end_idx = start_idx + page_size paginated_dashboards = dashboards[start_idx:end_idx] @@ -769,7 +879,11 @@ async def get_dashboards( include_git_status=bool(git_filters), ) - if can_apply_profile_filter and bound_username and profile_service is not None: + if ( + can_apply_profile_filter + and bound_username + and profile_service is not None + ): actor_aliases = _resolve_profile_actor_aliases(env, bound_username) if not actor_aliases: actor_aliases = [bound_username] @@ -818,7 +932,8 @@ async def get_dashboards( if search: search_lower = search.lower() dashboards = [ - d for d in dashboards + d + for d in dashboards if search_lower in d.get("title", "").lower() or search_lower in d.get("slug", "").lower() ] @@ -833,16 +948,28 @@ async def get_dashboards( if git_value not in git_filters: return False - llm_value = str( - ((dashboard.get("last_task") or {}).get("validation_status")) - or "UNKNOWN" - ).strip().lower() + llm_value = ( + str( + ( + (dashboard.get("last_task") or {}).get( + "validation_status" + ) + ) + or "UNKNOWN" + ) + .strip() + .lower() + ) if llm_filters and llm_value not in llm_filters: return False - changed_on_raw = str(dashboard.get("last_modified") or "").strip().lower() + changed_on_raw = ( + str(dashboard.get("last_modified") or "").strip().lower() + ) changed_on_prefix = ( - changed_on_raw[:10] if len(changed_on_raw) >= 10 else changed_on_raw + changed_on_raw[:10] + if len(changed_on_raw) >= 10 + else changed_on_raw ) if ( changed_on_filters @@ -865,7 +992,9 @@ async def get_dashboards( return True if has_column_filters: - dashboards = [d for d in dashboards if _matches_dashboard_filters(d)] + dashboards = [ + d for d in dashboards if _matches_dashboard_filters(d) + ] total = len(dashboards) total_pages = (total + page_size - 1) // page_size if total > 0 else 1 @@ -878,7 +1007,9 @@ async def get_dashboards( f"(page {page}/{total_pages}, total: {total}, profile_filter_applied={effective_profile_filter.applied})" ) - response_dashboards = _project_dashboard_response_items(paginated_dashboards) + response_dashboards = _project_dashboard_response_items( + paginated_dashboards + ) return DashboardsResponse( dashboards=response_dashboards, @@ -890,12 +1021,19 @@ async def get_dashboards( ) except Exception as e: - logger.error(f"[get_dashboards][Coherence:Failed] Failed to fetch dashboards: {e}") - raise HTTPException(status_code=503, detail=f"Failed to fetch dashboards: {str(e)}") + logger.error( + f"[get_dashboards][Coherence:Failed] Failed to fetch dashboards: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to fetch dashboards: {str(e)}" + ) + + # [/DEF:get_dashboards:Function] + # [DEF:get_database_mappings:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Get database mapping suggestions between source and target environments # @PRE: User has permission plugin:migration:read # @PRE: source_env_id and target_env_id are valid environment IDs @@ -910,64 +1048,85 @@ async def get_database_mappings( target_env_id: str, config_manager=Depends(get_config_manager), mapping_service=Depends(get_mapping_service), - _ = Depends(has_permission("plugin:migration", "READ")) + _=Depends(has_permission("plugin:migration", "READ")), ): - with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"): + with belief_scope( + "get_database_mappings", f"source={source_env_id}, target={target_env_id}" + ): # Validate environments exist environments = config_manager.get_environments() source_env = next((e for e in environments if e.id == source_env_id), None) target_env = next((e for e in environments if e.id == target_env_id), None) - + if not source_env: - logger.error(f"[get_database_mappings][Coherence:Failed] Source environment not found: {source_env_id}") + logger.error( + f"[get_database_mappings][Coherence:Failed] Source environment not found: {source_env_id}" + ) raise HTTPException(status_code=404, detail="Source environment not found") if not target_env: - logger.error(f"[get_database_mappings][Coherence:Failed] Target environment not found: {target_env_id}") + logger.error( + f"[get_database_mappings][Coherence:Failed] Target environment not found: {target_env_id}" + ) raise HTTPException(status_code=404, detail="Target environment not found") try: # Get mapping suggestions using MappingService - suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id) - + suggestions = await mapping_service.get_suggestions( + source_env_id, target_env_id + ) + # Format suggestions as DatabaseMapping objects mappings = [ DatabaseMapping( - source_db=s.get('source_db', ''), - target_db=s.get('target_db', ''), - source_db_uuid=s.get('source_db_uuid'), - target_db_uuid=s.get('target_db_uuid'), - confidence=s.get('confidence', 0.0) + source_db=s.get("source_db", ""), + target_db=s.get("target_db", ""), + source_db_uuid=s.get("source_db_uuid"), + target_db_uuid=s.get("target_db_uuid"), + confidence=s.get("confidence", 0.0), ) for s in suggestions ] - - logger.info(f"[get_database_mappings][Coherence:OK] Returning {len(mappings)} database mapping suggestions") - + + logger.info( + f"[get_database_mappings][Coherence:OK] Returning {len(mappings)} database mapping suggestions" + ) + return DatabaseMappingsResponse(mappings=mappings) - + except Exception as e: - logger.error(f"[get_database_mappings][Coherence:Failed] Failed to get database mappings: {e}") - raise HTTPException(status_code=503, detail=f"Failed to get database mappings: {str(e)}") + logger.error( + f"[get_database_mappings][Coherence:Failed] Failed to get database mappings: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to get database mappings: {str(e)}" + ) + + # [/DEF:get_database_mappings:Function] + # [DEF:get_dashboard_detail:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Fetch detailed dashboard info with related charts and datasets # @PRE: env_id must be valid and dashboard ref (slug or id) must exist # @POST: Returns dashboard detail payload for overview page -# @RELATION: CALLS ->[backend.src.core.async_superset_client.AsyncSupersetClient.get_dashboard_detail_async] +# @RELATION: CALLS ->[AsyncSupersetClient] @router.get("/{dashboard_ref}", response_model=DashboardDetailResponse) async def get_dashboard_detail( dashboard_ref: str, env_id: str, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:migration", "READ")) + _=Depends(has_permission("plugin:migration", "READ")), ): - with belief_scope("get_dashboard_detail", f"dashboard_ref={dashboard_ref}, env_id={env_id}"): + with belief_scope( + "get_dashboard_detail", f"dashboard_ref={dashboard_ref}, env_id={env_id}" + ): environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: - logger.error(f"[get_dashboard_detail][Coherence:Failed] Environment not found: {env_id}") + logger.error( + f"[get_dashboard_detail][Coherence:Failed] Environment not found: {env_id}" + ) raise HTTPException(status_code=404, detail="Environment not found") try: @@ -981,17 +1140,25 @@ async def get_dashboard_detail( except HTTPException: raise except Exception as e: - logger.error(f"[get_dashboard_detail][Coherence:Failed] Failed to fetch dashboard detail: {e}") - raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard detail: {str(e)}") + logger.error( + f"[get_dashboard_detail][Coherence:Failed] Failed to fetch dashboard detail: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to fetch dashboard detail: {str(e)}" + ) + + # [/DEF:get_dashboard_detail:Function] # [DEF:_task_matches_dashboard:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Checks whether task params are tied to a specific dashboard and environment. # @PRE: task-like object exposes plugin_id and params fields. # @POST: Returns True only for supported task plugins tied to dashboard_id (+optional env_id). -def _task_matches_dashboard(task: Any, dashboard_id: int, env_id: Optional[str]) -> bool: +def _task_matches_dashboard( + task: Any, dashboard_id: int, env_id: Optional[str] +) -> bool: plugin_id = getattr(task, "plugin_id", None) if plugin_id not in {"superset-backup", "llm_dashboard_validation"}: return False @@ -1017,11 +1184,13 @@ def _task_matches_dashboard(task: Any, dashboard_id: int, env_id: Optional[str]) task_env = params.get("environment_id") or params.get("env") return str(task_env) == str(env_id) return True + + # [/DEF:_task_matches_dashboard:Function] # [DEF:get_dashboard_tasks_history:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Returns history of backup and LLM validation tasks for a dashboard. # @PRE: dashboard ref (slug or id) is valid. # @POST: Response contains sorted task history (newest first). @@ -1032,9 +1201,12 @@ async def get_dashboard_tasks_history( limit: int = Query(20, ge=1, le=100), config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), - _ = Depends(has_permission("tasks", "READ")) + _=Depends(has_permission("tasks", "READ")), ): - with belief_scope("get_dashboard_tasks_history", f"dashboard_ref={dashboard_ref}, env_id={env_id}, limit={limit}"): + with belief_scope( + "get_dashboard_tasks_history", + f"dashboard_ref={dashboard_ref}, env_id={env_id}, limit={limit}", + ): dashboard_id: Optional[int] = None client: Optional[AsyncSupersetClient] = None try: @@ -1044,10 +1216,14 @@ async def get_dashboard_tasks_history( environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: - logger.error(f"[get_dashboard_tasks_history][Coherence:Failed] Environment not found: {env_id}") + logger.error( + f"[get_dashboard_tasks_history][Coherence:Failed] Environment not found: {env_id}" + ) raise HTTPException(status_code=404, detail="Environment not found") client = AsyncSupersetClient(env) - dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client) + dashboard_id = await _resolve_dashboard_id_from_ref_async( + dashboard_ref, client + ) else: logger.error( "[get_dashboard_tasks_history][Coherence:Failed] Non-numeric dashboard ref requires env_id" @@ -1063,9 +1239,8 @@ async def get_dashboard_tasks_history( matching_tasks.append(task) def _sort_key(task_obj: Any) -> str: - return ( - str(getattr(task_obj, "started_at", "") or "") - or str(getattr(task_obj, "finished_at", "") or "") + return str(getattr(task_obj, "started_at", "") or "") or str( + getattr(task_obj, "finished_at", "") or "" ) matching_tasks.sort(key=_sort_key, reverse=True) @@ -1092,18 +1267,28 @@ async def get_dashboard_tasks_history( plugin_id=str(getattr(task, "plugin_id", "")), status=str(getattr(task, "status", "")), validation_status=validation_status, - started_at=getattr(task, "started_at", None).isoformat() if getattr(task, "started_at", None) else None, - finished_at=getattr(task, "finished_at", None).isoformat() if getattr(task, "finished_at", None) else None, - env_id=str(params.get("environment_id") or params.get("env")) if (params.get("environment_id") or params.get("env")) else None, + started_at=getattr(task, "started_at", None).isoformat() + if getattr(task, "started_at", None) + else None, + finished_at=getattr(task, "finished_at", None).isoformat() + if getattr(task, "finished_at", None) + else None, + env_id=str(params.get("environment_id") or params.get("env")) + if (params.get("environment_id") or params.get("env")) + else None, summary=summary, ) ) - logger.info(f"[get_dashboard_tasks_history][Coherence:OK] Found {len(items)} tasks for dashboard_ref={dashboard_ref}, dashboard_id={dashboard_id}") + logger.info( + f"[get_dashboard_tasks_history][Coherence:OK] Found {len(items)} tasks for dashboard_ref={dashboard_ref}, dashboard_id={dashboard_id}" + ) return DashboardTaskHistoryResponse(dashboard_id=dashboard_id, items=items) finally: if client is not None: await client.aclose() + + # [/DEF:get_dashboard_tasks_history:Function] @@ -1118,13 +1303,18 @@ async def get_dashboard_thumbnail( env_id: str, force: bool = Query(False), config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:migration", "READ")) + _=Depends(has_permission("plugin:migration", "READ")), ): - with belief_scope("get_dashboard_thumbnail", f"dashboard_ref={dashboard_ref}, env_id={env_id}, force={force}"): + with belief_scope( + "get_dashboard_thumbnail", + f"dashboard_ref={dashboard_ref}, env_id={env_id}, force={force}", + ): environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: - logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Environment not found: {env_id}") + logger.error( + f"[get_dashboard_thumbnail][Coherence:Failed] Environment not found: {env_id}" + ) raise HTTPException(status_code=404, detail="Environment not found") try: @@ -1139,10 +1329,18 @@ async def get_dashboard_thumbnail( endpoint=f"/dashboard/{dashboard_id}/cache_dashboard_screenshot/", json={"force": force}, ) - payload = screenshot_payload.get("result", screenshot_payload) if isinstance(screenshot_payload, dict) else {} - image_url = payload.get("image_url", "") if isinstance(payload, dict) else "" + payload = ( + screenshot_payload.get("result", screenshot_payload) + if isinstance(screenshot_payload, dict) + else {} + ) + image_url = ( + payload.get("image_url", "") if isinstance(payload, dict) else "" + ) if isinstance(image_url, str) and image_url: - matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", image_url) + matched = re.search( + r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", image_url + ) if matched: digest = matched.group(1) except DashboardNotFoundError: @@ -1155,20 +1353,33 @@ async def get_dashboard_thumbnail( method="GET", endpoint=f"/dashboard/{dashboard_id}", ) - dashboard_data = dashboard_payload.get("result", dashboard_payload) if isinstance(dashboard_payload, dict) else {} - thumbnail_url = dashboard_data.get("thumbnail_url", "") if isinstance(dashboard_data, dict) else "" + dashboard_data = ( + dashboard_payload.get("result", dashboard_payload) + if isinstance(dashboard_payload, dict) + else {} + ) + thumbnail_url = ( + dashboard_data.get("thumbnail_url", "") + if isinstance(dashboard_data, dict) + else "" + ) if isinstance(thumbnail_url, str) and thumbnail_url: parsed = urlparse(thumbnail_url) parsed_path = parsed.path or thumbnail_url if parsed_path.startswith("/api/v1/"): - parsed_path = parsed_path[len("/api/v1"):] + parsed_path = parsed_path[len("/api/v1") :] thumb_endpoint = parsed_path - matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", parsed_path) + matched = re.search( + r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", + parsed_path, + ) if matched: digest = matched.group(1) if not thumb_endpoint: - thumb_endpoint = f"/dashboard/{dashboard_id}/thumbnail/{digest or 'latest'}/" + thumb_endpoint = ( + f"/dashboard/{dashboard_id}/thumbnail/{digest or 'latest'}/" + ) thumb_response = client.network.request( method="GET", @@ -1188,35 +1399,54 @@ async def get_dashboard_thumbnail( content_type = thumb_response.headers.get("Content-Type", "image/png") return Response(content=thumb_response.content, media_type=content_type) except DashboardNotFoundError as e: - logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Dashboard not found for thumbnail: {e}") + logger.error( + f"[get_dashboard_thumbnail][Coherence:Failed] Dashboard not found for thumbnail: {e}" + ) raise HTTPException(status_code=404, detail="Dashboard thumbnail not found") except HTTPException: raise except Exception as e: - logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Failed to fetch dashboard thumbnail: {e}") - raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard thumbnail: {str(e)}") + logger.error( + f"[get_dashboard_thumbnail][Coherence:Failed] Failed to fetch dashboard thumbnail: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to fetch dashboard thumbnail: {str(e)}" + ) + + # [/DEF:get_dashboard_thumbnail:Function] + # [DEF:MigrateRequest:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for dashboard migration requests. class MigrateRequest(BaseModel): source_env_id: str = Field(..., description="Source environment ID") target_env_id: str = Field(..., description="Target environment ID") - dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to migrate") - db_mappings: Optional[Dict[str, str]] = Field(None, description="Database mappings for migration") + dashboard_ids: List[int] = Field( + ..., description="List of dashboard IDs to migrate" + ) + db_mappings: Optional[Dict[str, str]] = Field( + None, description="Database mappings for migration" + ) replace_db_config: bool = Field(False, description="Replace database configuration") + + # [/DEF:MigrateRequest:DataClass] + # [DEF:TaskResponse:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for async task ID return. class TaskResponse(BaseModel): task_id: str + + # [/DEF:TaskResponse:DataClass] + # [DEF:migrate_dashboards:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Trigger bulk migration of dashboards from source to target environment # @PRE: User has permission plugin:migration:execute # @PRE: source_env_id and target_env_id are valid environment IDs @@ -1226,67 +1456,95 @@ class TaskResponse(BaseModel): # @PARAM: request (MigrateRequest) - Migration request with source, target, and dashboard IDs # @RETURN: TaskResponse - Task ID for tracking # @RELATION: DISPATCHES ->[MigrationPlugin:execute] -# @RELATION: CALLS ->[task_manager:create_task] +# @RELATION: CALLS ->[TaskManager] @router.post("/migrate", response_model=TaskResponse) async def migrate_dashboards( request: MigrateRequest, config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), - _ = Depends(has_permission("plugin:migration", "EXECUTE")) + _=Depends(has_permission("plugin:migration", "EXECUTE")), ): - with belief_scope("migrate_dashboards", f"source={request.source_env_id}, target={request.target_env_id}, count={len(request.dashboard_ids)}"): + with belief_scope( + "migrate_dashboards", + f"source={request.source_env_id}, target={request.target_env_id}, count={len(request.dashboard_ids)}", + ): # Validate request if not request.dashboard_ids: - logger.error("[migrate_dashboards][Coherence:Failed] No dashboard IDs provided") - raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided") - + logger.error( + "[migrate_dashboards][Coherence:Failed] No dashboard IDs provided" + ) + raise HTTPException( + status_code=400, detail="At least one dashboard ID must be provided" + ) + # Validate environments exist environments = config_manager.get_environments() - source_env = next((e for e in environments if e.id == request.source_env_id), None) - target_env = next((e for e in environments if e.id == request.target_env_id), None) - + source_env = next( + (e for e in environments if e.id == request.source_env_id), None + ) + target_env = next( + (e for e in environments if e.id == request.target_env_id), None + ) + if not source_env: - logger.error(f"[migrate_dashboards][Coherence:Failed] Source environment not found: {request.source_env_id}") + logger.error( + f"[migrate_dashboards][Coherence:Failed] Source environment not found: {request.source_env_id}" + ) raise HTTPException(status_code=404, detail="Source environment not found") if not target_env: - logger.error(f"[migrate_dashboards][Coherence:Failed] Target environment not found: {request.target_env_id}") + logger.error( + f"[migrate_dashboards][Coherence:Failed] Target environment not found: {request.target_env_id}" + ) raise HTTPException(status_code=404, detail="Target environment not found") - + try: # Create migration task task_params = { - 'source_env_id': request.source_env_id, - 'target_env_id': request.target_env_id, - 'selected_ids': request.dashboard_ids, - 'replace_db_config': request.replace_db_config, - 'db_mappings': request.db_mappings or {} + "source_env_id": request.source_env_id, + "target_env_id": request.target_env_id, + "selected_ids": request.dashboard_ids, + "replace_db_config": request.replace_db_config, + "db_mappings": request.db_mappings or {}, } - + task_obj = await task_manager.create_task( - plugin_id='superset-migration', - params=task_params + plugin_id="superset-migration", params=task_params ) - - logger.info(f"[migrate_dashboards][Coherence:OK] Migration task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards") - + + logger.info( + f"[migrate_dashboards][Coherence:OK] Migration task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards" + ) + return TaskResponse(task_id=str(task_obj.id)) - + except Exception as e: - logger.error(f"[migrate_dashboards][Coherence:Failed] Failed to create migration task: {e}") - raise HTTPException(status_code=503, detail=f"Failed to create migration task: {str(e)}") + logger.error( + f"[migrate_dashboards][Coherence:Failed] Failed to create migration task: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to create migration task: {str(e)}" + ) + + # [/DEF:migrate_dashboards:Function] + # [DEF:BackupRequest:DataClass] -# @COMPLEXITY: 3 +# @COMPLEXITY: 1 # @PURPOSE: DTO for dashboard backup requests. class BackupRequest(BaseModel): env_id: str = Field(..., description="Environment ID") dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to backup") - schedule: Optional[str] = Field(None, description="Cron schedule for recurring backups (e.g., '0 0 * * *')") + schedule: Optional[str] = Field( + None, description="Cron schedule for recurring backups (e.g., '0 0 * * *')" + ) + + # [/DEF:BackupRequest:DataClass] + # [DEF:backup_dashboards:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Trigger bulk backup of dashboards with optional cron schedule # @PRE: User has permission plugin:backup:execute # @PRE: env_id is a valid environment ID @@ -1297,48 +1555,64 @@ class BackupRequest(BaseModel): # @PARAM: request (BackupRequest) - Backup request with environment and dashboard IDs # @RETURN: TaskResponse - Task ID for tracking # @RELATION: DISPATCHES ->[BackupPlugin:execute] -# @RELATION: CALLS ->[task_manager:create_task] +# @RELATION: CALLS ->[TaskManager] @router.post("/backup", response_model=TaskResponse) async def backup_dashboards( request: BackupRequest, config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), - _ = Depends(has_permission("plugin:backup", "EXECUTE")) + _=Depends(has_permission("plugin:backup", "EXECUTE")), ): - with belief_scope("backup_dashboards", f"env={request.env_id}, count={len(request.dashboard_ids)}, schedule={request.schedule}"): + with belief_scope( + "backup_dashboards", + f"env={request.env_id}, count={len(request.dashboard_ids)}, schedule={request.schedule}", + ): # Validate request if not request.dashboard_ids: - logger.error("[backup_dashboards][Coherence:Failed] No dashboard IDs provided") - raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided") - + logger.error( + "[backup_dashboards][Coherence:Failed] No dashboard IDs provided" + ) + raise HTTPException( + status_code=400, detail="At least one dashboard ID must be provided" + ) + # Validate environment exists environments = config_manager.get_environments() env = next((e for e in environments if e.id == request.env_id), None) - + if not env: - logger.error(f"[backup_dashboards][Coherence:Failed] Environment not found: {request.env_id}") + logger.error( + f"[backup_dashboards][Coherence:Failed] Environment not found: {request.env_id}" + ) raise HTTPException(status_code=404, detail="Environment not found") - + try: # Create backup task task_params = { - 'env': request.env_id, - 'dashboards': request.dashboard_ids, - 'schedule': request.schedule + "env": request.env_id, + "dashboards": request.dashboard_ids, + "schedule": request.schedule, } - + task_obj = await task_manager.create_task( - plugin_id='superset-backup', - params=task_params + plugin_id="superset-backup", params=task_params ) - - logger.info(f"[backup_dashboards][Coherence:OK] Backup task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards") - + + logger.info( + f"[backup_dashboards][Coherence:OK] Backup task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards" + ) + return TaskResponse(task_id=str(task_obj.id)) - + except Exception as e: - logger.error(f"[backup_dashboards][Coherence:Failed] Failed to create backup task: {e}") - raise HTTPException(status_code=503, detail=f"Failed to create backup task: {str(e)}") + logger.error( + f"[backup_dashboards][Coherence:Failed] Failed to create backup task: {e}" + ) + raise HTTPException( + status_code=503, detail=f"Failed to create backup task: {str(e)}" + ) + + # [/DEF:backup_dashboards:Function] -# [/DEF:backend.src.api.routes.dashboards:Module] +# [/DEF:DashboardsApi:Module] diff --git a/backend/src/api/routes/git.py b/backend/src/api/routes/git.py index 13c84a9f..6553d756 100644 --- a/backend/src/api/routes/git.py +++ b/backend/src/api/routes/git.py @@ -1,11 +1,11 @@ -# [DEF:backend.src.api.routes.git:Module] +# [DEF:GitApi:Module] # # @COMPLEXITY: 3 # @SEMANTICS: git, routes, api, fastapi, repository, deployment # @PURPOSE: Provides FastAPI endpoints for Git integration operations. # @LAYER: API # @RELATION: USES -> [backend.src.services.git_service.GitService] -# @RELATION: USES -> [backend.src.api.routes.git_schemas] +# @RELATION: USES -> [GitSchemas] # @RELATION: USES -> [backend.src.models.git] # # @INVARIANT: All Git operations must be routed through GitService. @@ -21,16 +21,30 @@ from src.models.auth import User from src.models.git import GitServerConfig, GitRepository, GitProvider from src.models.profile import UserDashboardPreference from src.api.routes.git_schemas import ( - GitServerConfigSchema, GitServerConfigCreate, GitServerConfigUpdate, - BranchSchema, BranchCreate, - BranchCheckout, CommitSchema, CommitCreate, - DeploymentEnvironmentSchema, DeployRequest, RepoInitRequest, + GitServerConfigSchema, + GitServerConfigCreate, + GitServerConfigUpdate, + BranchSchema, + BranchCreate, + BranchCheckout, + CommitSchema, + CommitCreate, + DeploymentEnvironmentSchema, + DeployRequest, + RepoInitRequest, RepositoryBindingSchema, - RepoStatusBatchRequest, RepoStatusBatchResponse, - GiteaRepoCreateRequest, GiteaRepoSchema, - RemoteRepoCreateRequest, RemoteRepoSchema, - PromoteRequest, PromoteResponse, - MergeStatusSchema, MergeConflictFileSchema, MergeResolveRequest, MergeContinueRequest, + RepoStatusBatchRequest, + RepoStatusBatchResponse, + GiteaRepoCreateRequest, + GiteaRepoSchema, + RemoteRepoCreateRequest, + RemoteRepoSchema, + PromoteRequest, + PromoteResponse, + MergeStatusSchema, + MergeConflictFileSchema, + MergeResolveRequest, + MergeContinueRequest, ) from src.services.git_service import GitService from src.core.async_superset_client import AsyncSupersetClient @@ -69,6 +83,8 @@ def _build_no_repo_status_payload() -> dict: "sync_status": "NO_REPO", "has_repo": False, } + + # [/DEF:_build_no_repo_status_payload:Function] @@ -82,11 +98,13 @@ def _build_no_repo_status_payload() -> dict: def _handle_unexpected_git_route_error(route_name: str, error: Exception) -> None: logger.error(f"[{route_name}][Coherence:Failed] {error}") raise HTTPException(status_code=500, detail=f"{route_name} failed: {str(error)}") + + # [/DEF:_handle_unexpected_git_route_error:Function] # [DEF:_resolve_repository_status:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve repository status for one dashboard with graceful NO_REPO semantics. # @PRE: `dashboard_id` is a valid integer. # @POST: Returns standard status payload or `NO_REPO` payload when repository path is absent. @@ -109,11 +127,13 @@ def _resolve_repository_status(dashboard_id: int) -> dict: ) return _build_no_repo_status_payload() raise + + # [/DEF:_resolve_repository_status:Function] # [DEF:_get_git_config_or_404:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve GitServerConfig by id or raise 404. # @PRE: db session is available. # @POST: Returns GitServerConfig model. @@ -122,11 +142,13 @@ def _get_git_config_or_404(db: Session, config_id: str) -> GitServerConfig: if not config: raise HTTPException(status_code=404, detail="Git configuration not found") return config + + # [/DEF:_get_git_config_or_404:Function] # [DEF:_find_dashboard_id_by_slug:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard numeric ID by slug in a specific environment. # @PRE: dashboard_slug is non-empty. # @POST: Returns dashboard ID or None when not found. @@ -135,8 +157,16 @@ def _find_dashboard_id_by_slug( dashboard_slug: str, ) -> Optional[int]: query_variants = [ - {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, - {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, + { + "filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, + { + "filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, ] for query in query_variants: @@ -149,11 +179,13 @@ def _find_dashboard_id_by_slug( except Exception: continue return None + + # [/DEF:_find_dashboard_id_by_slug:Function] # [DEF:_resolve_dashboard_id_from_ref:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard ID from slug-or-id reference for Git routes. # @PRE: dashboard_ref is provided; env_id is required for slug values. # @POST: Returns numeric dashboard ID or raises HTTPException. @@ -182,13 +214,17 @@ def _resolve_dashboard_id_from_ref( dashboard_id = _find_dashboard_id_by_slug(SupersetClient(env), normalized_ref) if dashboard_id is None: - raise HTTPException(status_code=404, detail=f"Dashboard slug '{normalized_ref}' not found") + raise HTTPException( + status_code=404, detail=f"Dashboard slug '{normalized_ref}' not found" + ) return dashboard_id + + # [/DEF:_resolve_dashboard_id_from_ref:Function] # [DEF:_find_dashboard_id_by_slug_async:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard numeric ID by slug asynchronously for hot-path Git routes. # @PRE: dashboard_slug is non-empty. # @POST: Returns dashboard ID or None when not found. @@ -197,8 +233,16 @@ async def _find_dashboard_id_by_slug_async( dashboard_slug: str, ) -> Optional[int]: query_variants = [ - {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, - {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, + { + "filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, + { + "filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], + "page": 0, + "page_size": 1, + }, ] for query in query_variants: @@ -211,11 +255,13 @@ async def _find_dashboard_id_by_slug_async( except Exception: continue return None + + # [/DEF:_find_dashboard_id_by_slug_async:Function] # [DEF:_resolve_dashboard_id_from_ref_async:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve dashboard ID asynchronously from slug-or-id reference for hot Git routes. # @PRE: dashboard_ref is provided; env_id is required for slug values. # @POST: Returns numeric dashboard ID or raises HTTPException. @@ -246,15 +292,19 @@ async def _resolve_dashboard_id_from_ref_async( try: dashboard_id = await _find_dashboard_id_by_slug_async(client, normalized_ref) if dashboard_id is None: - raise HTTPException(status_code=404, detail=f"Dashboard slug '{normalized_ref}' not found") + raise HTTPException( + status_code=404, detail=f"Dashboard slug '{normalized_ref}' not found" + ) return dashboard_id finally: await client.aclose() + + # [/DEF:_resolve_dashboard_id_from_ref_async:Function] # [DEF:_resolve_repo_key_from_ref:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve repository folder key with slug-first strategy and deterministic fallback. # @PRE: dashboard_id is resolved and valid. # @POST: Returns safe key to be used in local repository path. @@ -275,7 +325,9 @@ def _resolve_repo_key_from_ref( env = next((e for e in environments if e.id == env_id), None) if env: payload = SupersetClient(env).get_dashboard(dashboard_id) - dashboard_data = payload.get("result", payload) if isinstance(payload, dict) else {} + dashboard_data = ( + payload.get("result", payload) if isinstance(payload, dict) else {} + ) dashboard_slug = dashboard_data.get("slug") if dashboard_slug: return str(dashboard_slug) @@ -283,6 +335,8 @@ def _resolve_repo_key_from_ref( pass return f"dashboard-{dashboard_id}" + + # [/DEF:_resolve_repo_key_from_ref:Function] @@ -297,11 +351,13 @@ def _sanitize_optional_identity_value(value: Optional[str]) -> Optional[str]: if not normalized: return None return normalized + + # [/DEF:_sanitize_optional_identity_value:Function] # [DEF:_resolve_current_user_git_identity:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Resolve configured Git username/email from current user's profile preferences. # @PRE: `db` may be stubbed in tests; `current_user` may be absent for direct handler invocations. # @POST: Returns tuple(username, email) only when both values are configured. @@ -334,16 +390,22 @@ def _resolve_current_user_git_identity( if not preference: return None - git_username = _sanitize_optional_identity_value(getattr(preference, "git_username", None)) - git_email = _sanitize_optional_identity_value(getattr(preference, "git_email", None)) + git_username = _sanitize_optional_identity_value( + getattr(preference, "git_username", None) + ) + git_email = _sanitize_optional_identity_value( + getattr(preference, "git_email", None) + ) if not git_username or not git_email: return None return git_username, git_email + + # [/DEF:_resolve_current_user_git_identity:Function] # [DEF:_apply_git_identity_from_profile:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Apply user-scoped Git identity to repository-local config before write/pull operations. # @PRE: dashboard_id is resolved; db/current_user may be missing in direct test invocation context. # @POST: git_service.configure_identity is called only when identity and method are available. @@ -363,19 +425,20 @@ def _apply_git_identity_from_profile( git_username, git_email = identity configure_identity(dashboard_id, git_username, git_email) + + # [/DEF:_apply_git_identity_from_profile:Function] # [DEF:get_git_configs:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: List all configured Git servers. # @PRE: Database session `db` is available. # @POST: Returns a list of all GitServerConfig objects from the database. # @RETURN: List[GitServerConfigSchema] @router.get("/config", response_model=List[GitServerConfigSchema]) async def get_git_configs( - db: Session = Depends(get_db), - _ = Depends(has_permission("git_config", "READ")) + db: Session = Depends(get_db), _=Depends(has_permission("git_config", "READ")) ): with belief_scope("get_git_configs"): configs = db.query(GitServerConfig).all() @@ -385,10 +448,13 @@ async def get_git_configs( schema.pat = "********" result.append(schema) return result + + # [/DEF:get_git_configs:Function] + # [DEF:create_git_config:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Register a new Git server configuration. # @PRE: `config` contains valid GitServerConfigCreate data. # @POST: A new GitServerConfig record is created in the database. @@ -398,7 +464,7 @@ async def get_git_configs( async def create_git_config( config: GitServerConfigCreate, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("create_git_config"): config_dict = config.dict(exclude={"config_id"}) @@ -407,10 +473,13 @@ async def create_git_config( db.commit() db.refresh(db_config) return db_config + + # [/DEF:create_git_config:Function] + # [DEF:update_git_config:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Update an existing Git server configuration. # @PRE: `config_id` corresponds to an existing configuration. # @POST: The configuration record is updated in the database. @@ -422,30 +491,35 @@ async def update_git_config( config_id: str, config_update: GitServerConfigUpdate, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("update_git_config"): - db_config = db.query(GitServerConfig).filter(GitServerConfig.id == config_id).first() + db_config = ( + db.query(GitServerConfig).filter(GitServerConfig.id == config_id).first() + ) if not db_config: raise HTTPException(status_code=404, detail="Configuration not found") - + update_data = config_update.dict(exclude_unset=True) if update_data.get("pat") == "********": update_data.pop("pat") - + for key, value in update_data.items(): setattr(db_config, key, value) - + db.commit() db.refresh(db_config) - + result_schema = GitServerConfigSchema.from_orm(db_config) result_schema.pat = "********" return result_schema + + # [/DEF:update_git_config:Function] + # [DEF:delete_git_config:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Remove a Git server configuration. # @PRE: `config_id` corresponds to an existing configuration. # @POST: The configuration record is removed from the database. @@ -454,20 +528,25 @@ async def update_git_config( async def delete_git_config( config_id: str, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("delete_git_config"): - db_config = db.query(GitServerConfig).filter(GitServerConfig.id == config_id).first() + db_config = ( + db.query(GitServerConfig).filter(GitServerConfig.id == config_id).first() + ) if not db_config: raise HTTPException(status_code=404, detail="Configuration not found") - + db.delete(db_config) db.commit() return {"status": "success", "message": "Configuration deleted"} + + # [/DEF:delete_git_config:Function] + # [DEF:test_git_config:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Validate connection to a Git server using provided credentials. # @PRE: `config` contains provider, url, and pat. # @POST: Returns success if the connection is validated via GitService. @@ -476,30 +555,42 @@ async def delete_git_config( async def test_git_config( config: GitServerConfigCreate, db: Session = Depends(get_db), - _ = Depends(has_permission("git_config", "READ")) + _=Depends(has_permission("git_config", "READ")), ): with belief_scope("test_git_config"): pat_to_use = config.pat if pat_to_use == "********": if config.config_id: - db_config = db.query(GitServerConfig).filter(GitServerConfig.id == config.config_id).first() + db_config = ( + db.query(GitServerConfig) + .filter(GitServerConfig.id == config.config_id) + .first() + ) if db_config: pat_to_use = db_config.pat else: - db_config = db.query(GitServerConfig).filter(GitServerConfig.url == config.url).first() + db_config = ( + db.query(GitServerConfig) + .filter(GitServerConfig.url == config.url) + .first() + ) if db_config: pat_to_use = db_config.pat - success = await git_service.test_connection(config.provider, config.url, pat_to_use) + success = await git_service.test_connection( + config.provider, config.url, pat_to_use + ) if success: return {"status": "success", "message": "Connection successful"} else: raise HTTPException(status_code=400, detail="Connection failed") + + # [/DEF:test_git_config:Function] # [DEF:list_gitea_repositories:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: List repositories in Gitea for a saved Gitea config. # @PRE: config_id exists and provider is GITEA. # @POST: Returns repositories visible to PAT user. @@ -507,12 +598,14 @@ async def test_git_config( async def list_gitea_repositories( config_id: str, db: Session = Depends(get_db), - _ = Depends(has_permission("git_config", "READ")) + _=Depends(has_permission("git_config", "READ")), ): with belief_scope("list_gitea_repositories"): config = _get_git_config_or_404(db, config_id) if config.provider != GitProvider.GITEA: - raise HTTPException(status_code=400, detail="This endpoint supports GITEA provider only") + raise HTTPException( + status_code=400, detail="This endpoint supports GITEA provider only" + ) repos = await git_service.list_gitea_repositories(config.url, config.pat) return [ GiteaRepoSchema( @@ -526,11 +619,13 @@ async def list_gitea_repositories( ) for repo in repos ] + + # [/DEF:list_gitea_repositories:Function] # [DEF:create_gitea_repository:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Create a repository in Gitea for a saved Gitea config. # @PRE: config_id exists and provider is GITEA. # @POST: Returns created repository payload. @@ -539,12 +634,14 @@ async def create_gitea_repository( config_id: str, request: GiteaRepoCreateRequest, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("create_gitea_repository"): config = _get_git_config_or_404(db, config_id) if config.provider != GitProvider.GITEA: - raise HTTPException(status_code=400, detail="This endpoint supports GITEA provider only") + raise HTTPException( + status_code=400, detail="This endpoint supports GITEA provider only" + ) repo = await git_service.create_gitea_repository( server_url=config.url, pat=config.pat, @@ -563,11 +660,13 @@ async def create_gitea_repository( ssh_url=repo.get("ssh_url"), default_branch=repo.get("default_branch"), ) + + # [/DEF:create_gitea_repository:Function] # [DEF:create_remote_repository:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Create repository on remote Git server using selected provider config. # @PRE: config_id exists and PAT has creation permissions. # @POST: Returns normalized remote repository payload. @@ -576,7 +675,7 @@ async def create_remote_repository( config_id: str, request: RemoteRepoCreateRequest, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("create_remote_repository"): config = _get_git_config_or_404(db, config_id) @@ -612,7 +711,9 @@ async def create_remote_repository( default_branch=request.default_branch, ) else: - raise HTTPException(status_code=501, detail=f"Provider {config.provider} is not supported") + raise HTTPException( + status_code=501, detail=f"Provider {config.provider} is not supported" + ) return RemoteRepoSchema( provider=config.provider, @@ -624,11 +725,13 @@ async def create_remote_repository( ssh_url=repo.get("ssh_url"), default_branch=repo.get("default_branch"), ) + + # [/DEF:create_remote_repository:Function] # [DEF:delete_gitea_repository:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Delete repository in Gitea for a saved Gitea config. # @PRE: config_id exists and provider is GITEA. # @POST: Target repository is deleted on Gitea. @@ -638,12 +741,14 @@ async def delete_gitea_repository( owner: str, repo_name: str, db: Session = Depends(get_db), - _ = Depends(has_permission("admin:settings", "WRITE")) + _=Depends(has_permission("admin:settings", "WRITE")), ): with belief_scope("delete_gitea_repository"): config = _get_git_config_or_404(db, config_id) if config.provider != GitProvider.GITEA: - raise HTTPException(status_code=400, detail="This endpoint supports GITEA provider only") + raise HTTPException( + status_code=400, detail="This endpoint supports GITEA provider only" + ) await git_service.delete_gitea_repository( server_url=config.url, pat=config.pat, @@ -651,8 +756,11 @@ async def delete_gitea_repository( repo_name=repo_name, ) return {"status": "success", "message": "Repository deleted"} + + # [/DEF:delete_gitea_repository:Function] + # [DEF:init_repository:Function] # @COMPLEXITY: 3 # @PURPOSE: Link a dashboard to a Git repository and perform initial clone/init. @@ -667,24 +775,44 @@ async def init_repository( env_id: Optional[str] = None, config_manager=Depends(get_config_manager), db: Session = Depends(get_db), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("init_repository"): - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) - repo_key = _resolve_repo_key_from_ref(dashboard_ref, dashboard_id, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) + repo_key = _resolve_repo_key_from_ref( + dashboard_ref, dashboard_id, config_manager, env_id + ) # 1. Get config - config = db.query(GitServerConfig).filter(GitServerConfig.id == init_data.config_id).first() + config = ( + db.query(GitServerConfig) + .filter(GitServerConfig.id == init_data.config_id) + .first() + ) if not config: raise HTTPException(status_code=404, detail="Git configuration not found") - + try: # 2. Perform Git clone/init - logger.info(f"[init_repository][Action] Initializing repo for dashboard {dashboard_id}") - git_service.init_repo(dashboard_id, init_data.remote_url, config.pat, repo_key=repo_key, default_branch=config.default_branch) - + logger.info( + f"[init_repository][Action] Initializing repo for dashboard {dashboard_id}" + ) + git_service.init_repo( + dashboard_id, + init_data.remote_url, + config.pat, + repo_key=repo_key, + default_branch=config.default_branch, + ) + # 3. Save to DB repo_path = git_service._get_repo_path(dashboard_id, repo_key=repo_key) - db_repo = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first() + db_repo = ( + db.query(GitRepository) + .filter(GitRepository.dashboard_id == dashboard_id) + .first() + ) if not db_repo: db_repo = GitRepository( dashboard_id=dashboard_id, @@ -699,20 +827,27 @@ async def init_repository( db_repo.remote_url = init_data.remote_url db_repo.local_path = repo_path db_repo.current_branch = "dev" - + db.commit() - logger.info(f"[init_repository][Coherence:OK] Repository initialized for dashboard {dashboard_id}") + logger.info( + f"[init_repository][Coherence:OK] Repository initialized for dashboard {dashboard_id}" + ) return {"status": "success", "message": "Repository initialized"} except Exception as e: db.rollback() - logger.error(f"[init_repository][Coherence:Failed] Failed to init repository: {e}") + logger.error( + f"[init_repository][Coherence:Failed] Failed to init repository: {e}" + ) if isinstance(e, HTTPException): raise _handle_unexpected_git_route_error("init_repository", e) + + # [/DEF:init_repository:Function] + # [DEF:get_repository_binding:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Return repository binding with provider metadata for selected dashboard. # @PRE: `dashboard_ref` resolves to a valid dashboard and repository is initialized. # @POST: Returns dashboard repository binding and linked provider. @@ -724,14 +859,22 @@ async def get_repository_binding( env_id: Optional[str] = None, config_manager=Depends(get_config_manager), db: Session = Depends(get_db), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_repository_binding"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) - db_repo = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first() + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) + db_repo = ( + db.query(GitRepository) + .filter(GitRepository.dashboard_id == dashboard_id) + .first() + ) if not db_repo: - raise HTTPException(status_code=404, detail="Repository not initialized") + raise HTTPException( + status_code=404, detail="Repository not initialized" + ) config = _get_git_config_or_404(db, db_repo.config_id) return RepositoryBindingSchema( dashboard_id=db_repo.dashboard_id, @@ -744,10 +887,13 @@ async def get_repository_binding( raise except Exception as e: _handle_unexpected_git_route_error("get_repository_binding", e) + + # [/DEF:get_repository_binding:Function] + # [DEF:delete_repository:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Delete local repository workspace and DB binding for selected dashboard. # @PRE: `dashboard_ref` resolves to a valid dashboard. # @POST: Repository files and binding record are removed when present. @@ -758,21 +904,26 @@ async def delete_repository( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("delete_repository"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) git_service.delete_repo(dashboard_id) return {"status": "success"} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("delete_repository", e) + + # [/DEF:delete_repository:Function] + # [DEF:get_branches:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: List all branches for a dashboard's repository. # @PRE: Repository for `dashboard_ref` is initialized. # @POST: Returns a list of branches from the local repository. @@ -783,20 +934,25 @@ async def get_branches( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_branches"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.list_branches(dashboard_id) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_branches", e) + + # [/DEF:get_branches:Function] + # [DEF:create_branch:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Create a new branch in the dashboard's repository. # @PRE: `dashboard_ref` repository exists and `branch_data` has name and from_branch. # @POST: A new branch is created in the local repository. @@ -810,22 +966,29 @@ async def create_branch( config_manager=Depends(get_config_manager), db: Session = Depends(get_db), current_user: User = Depends(get_current_user), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("create_branch"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) _apply_git_identity_from_profile(dashboard_id, db, current_user) - git_service.create_branch(dashboard_id, branch_data.name, branch_data.from_branch) + git_service.create_branch( + dashboard_id, branch_data.name, branch_data.from_branch + ) return {"status": "success"} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("create_branch", e) + + # [/DEF:create_branch:Function] + # [DEF:checkout_branch:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Switch the dashboard's repository to a specific branch. # @PRE: `dashboard_ref` repository exists and branch `checkout_data.name` exists. # @POST: The local repository HEAD is moved to the specified branch. @@ -837,21 +1000,26 @@ async def checkout_branch( checkout_data: BranchCheckout, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("checkout_branch"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) git_service.checkout_branch(dashboard_id, checkout_data.name) return {"status": "success"} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("checkout_branch", e) + + # [/DEF:checkout_branch:Function] + # [DEF:commit_changes:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Stage and commit changes in the dashboard's repository. # @PRE: `dashboard_ref` repository exists and `commit_data` has message and files. # @POST: Specified files are staged and a new commit is created. @@ -865,22 +1033,29 @@ async def commit_changes( config_manager=Depends(get_config_manager), db: Session = Depends(get_db), current_user: User = Depends(get_current_user), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("commit_changes"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) _apply_git_identity_from_profile(dashboard_id, db, current_user) - git_service.commit_changes(dashboard_id, commit_data.message, commit_data.files) + git_service.commit_changes( + dashboard_id, commit_data.message, commit_data.files + ) return {"status": "success"} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("commit_changes", e) + + # [/DEF:commit_changes:Function] + # [DEF:push_changes:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Push local commits to the remote repository. # @PRE: `dashboard_ref` repository exists and has a remote configured. # @POST: Local commits are pushed to the remote repository. @@ -890,19 +1065,24 @@ async def push_changes( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("push_changes"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) git_service.push_changes(dashboard_id) return {"status": "success"} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("push_changes", e) + + # [/DEF:push_changes:Function] + # [DEF:pull_changes:Function] # @COMPLEXITY: 3 # @PURPOSE: Pull changes from the remote repository. @@ -916,19 +1096,29 @@ async def pull_changes( config_manager=Depends(get_config_manager), db: Session = Depends(get_db), current_user: User = Depends(get_current_user), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("pull_changes"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) db_repo = None config_url = None config_provider = None try: - db_repo_candidate = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first() + db_repo_candidate = ( + db.query(GitRepository) + .filter(GitRepository.dashboard_id == dashboard_id) + .first() + ) if getattr(db_repo_candidate, "config_id", None): db_repo = db_repo_candidate - config_row = db.query(GitServerConfig).filter(GitServerConfig.id == db_repo.config_id).first() + config_row = ( + db.query(GitServerConfig) + .filter(GitServerConfig.id == db_repo.config_id) + .first() + ) if config_row: config_url = config_row.url config_provider = config_row.provider @@ -958,51 +1148,67 @@ async def pull_changes( raise except Exception as e: _handle_unexpected_git_route_error("pull_changes", e) + + # [/DEF:pull_changes:Function] + # [DEF:get_merge_status:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Return unfinished-merge status for repository (web-only recovery support). # @PRE: `dashboard_ref` resolves to a valid dashboard repository. # @POST: Returns merge status payload. -@router.get("/repositories/{dashboard_ref}/merge/status", response_model=MergeStatusSchema) +@router.get( + "/repositories/{dashboard_ref}/merge/status", response_model=MergeStatusSchema +) async def get_merge_status( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_merge_status"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.get_merge_status(dashboard_id) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_merge_status", e) + + # [/DEF:get_merge_status:Function] # [DEF:get_merge_conflicts:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Return conflicted files with mine/theirs previews for web conflict resolver. # @PRE: `dashboard_ref` resolves to a valid dashboard repository. # @POST: Returns conflict file list. -@router.get("/repositories/{dashboard_ref}/merge/conflicts", response_model=List[MergeConflictFileSchema]) +@router.get( + "/repositories/{dashboard_ref}/merge/conflicts", + response_model=List[MergeConflictFileSchema], +) async def get_merge_conflicts( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_merge_conflicts"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.get_merge_conflicts(dashboard_id) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_merge_conflicts", e) + + # [/DEF:get_merge_conflicts:Function] @@ -1017,11 +1223,13 @@ async def resolve_merge_conflicts( resolve_data: MergeResolveRequest, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("resolve_merge_conflicts"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) resolved_files = git_service.resolve_merge_conflicts( dashboard_id, [item.dict() for item in resolve_data.resolutions], @@ -1031,11 +1239,13 @@ async def resolve_merge_conflicts( raise except Exception as e: _handle_unexpected_git_route_error("resolve_merge_conflicts", e) + + # [/DEF:resolve_merge_conflicts:Function] # [DEF:abort_merge:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Abort unfinished merge from WebUI flow. # @PRE: `dashboard_ref` resolves to repository. # @POST: Merge operation is aborted or reports no active merge. @@ -1044,16 +1254,20 @@ async def abort_merge( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("abort_merge"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.abort_merge(dashboard_id) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("abort_merge", e) + + # [/DEF:abort_merge:Function] @@ -1068,16 +1282,20 @@ async def continue_merge( continue_data: MergeContinueRequest, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("continue_merge"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.continue_merge(dashboard_id, continue_data.message) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("continue_merge", e) + + # [/DEF:continue_merge:Function] @@ -1094,22 +1312,29 @@ async def sync_dashboard( env_id: Optional[str] = None, source_env_id: typing.Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("sync_dashboard"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) from src.plugins.git_plugin import GitPlugin + plugin = GitPlugin() - return await plugin.execute({ - "operation": "sync", - "dashboard_id": dashboard_id, - "source_env_id": source_env_id - }) + return await plugin.execute( + { + "operation": "sync", + "dashboard_id": dashboard_id, + "source_env_id": source_env_id, + } + ) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("sync_dashboard", e) + + # [/DEF:sync_dashboard:Function] @@ -1126,27 +1351,42 @@ async def promote_dashboard( config_manager=Depends(get_config_manager), db: Session = Depends(get_db), current_user: User = Depends(get_current_user), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("promote_dashboard"): - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) - db_repo = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first() + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) + db_repo = ( + db.query(GitRepository) + .filter(GitRepository.dashboard_id == dashboard_id) + .first() + ) if not db_repo: - raise HTTPException(status_code=404, detail=f"Repository for dashboard {dashboard_ref} is not initialized") + raise HTTPException( + status_code=404, + detail=f"Repository for dashboard {dashboard_ref} is not initialized", + ) config = _get_git_config_or_404(db, db_repo.config_id) from_branch = payload.from_branch.strip() to_branch = payload.to_branch.strip() if not from_branch or not to_branch: - raise HTTPException(status_code=400, detail="from_branch and to_branch are required") + raise HTTPException( + status_code=400, detail="from_branch and to_branch are required" + ) if from_branch == to_branch: - raise HTTPException(status_code=400, detail="from_branch and to_branch must be different") + raise HTTPException( + status_code=400, detail="from_branch and to_branch must be different" + ) mode = (payload.mode or "mr").strip().lower() if mode == "direct": reason = (payload.reason or "").strip() if not reason: - raise HTTPException(status_code=400, detail="Direct promote requires non-empty reason") + raise HTTPException( + status_code=400, detail="Direct promote requires non-empty reason" + ) logger.warning( "[promote_dashboard][PolicyViolation] Direct promote without MR by actor=unknown dashboard_ref=%s from=%s to=%s reason=%s", dashboard_ref, @@ -1203,7 +1443,10 @@ async def promote_dashboard( remove_source_branch=payload.remove_source_branch, ) else: - raise HTTPException(status_code=501, detail=f"Provider {config.provider} does not support promotion API") + raise HTTPException( + status_code=501, + detail=f"Provider {config.provider} does not support promotion API", + ) return PromoteResponse( mode="mr", @@ -1214,10 +1457,13 @@ async def promote_dashboard( reference_id=str(pr.get("id")) if pr.get("id") is not None else None, policy_violation=False, ) + + # [/DEF:promote_dashboard:Function] + # [DEF:get_environments:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: List all deployment environments. # @PRE: Config manager is accessible. # @POST: Returns a list of DeploymentEnvironmentSchema objects. @@ -1225,20 +1471,21 @@ async def promote_dashboard( @router.get("/environments", response_model=List[DeploymentEnvironmentSchema]) async def get_environments( config_manager=Depends(get_config_manager), - _ = Depends(has_permission("environments", "READ")) + _=Depends(has_permission("environments", "READ")), ): with belief_scope("get_environments"): envs = config_manager.get_environments() return [ DeploymentEnvironmentSchema( - id=e.id, - name=e.name, - superset_url=e.url, - is_active=True - ) for e in envs + id=e.id, name=e.name, superset_url=e.url, is_active=True + ) + for e in envs ] + + # [/DEF:get_environments:Function] + # [DEF:deploy_dashboard:Function] # @COMPLEXITY: 3 # @PURPOSE: Deploy dashboard from Git to a target environment. @@ -1252,26 +1499,34 @@ async def deploy_dashboard( deploy_data: DeployRequest, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("deploy_dashboard"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) from src.plugins.git_plugin import GitPlugin + plugin = GitPlugin() - return await plugin.execute({ - "operation": "deploy", - "dashboard_id": dashboard_id, - "environment_id": deploy_data.environment_id - }) + return await plugin.execute( + { + "operation": "deploy", + "dashboard_id": dashboard_id, + "environment_id": deploy_data.environment_id, + } + ) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("deploy_dashboard", e) + + # [/DEF:deploy_dashboard:Function] + # [DEF:get_history:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: View commit history for a dashboard's repository. # @PRE: `dashboard_ref` repository exists. # @POST: Returns a list of recent commits from the repository. @@ -1284,20 +1539,25 @@ async def get_history( limit: int = 50, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_history"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) return git_service.get_commit_history(dashboard_id, limit) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_history", e) + + # [/DEF:get_history:Function] + # [DEF:get_repository_status:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Get current Git status for a dashboard repository. # @PRE: `dashboard_ref` resolves to a valid dashboard. # @POST: Returns repository status; if repo is not initialized, returns `NO_REPO` payload. @@ -1308,21 +1568,25 @@ async def get_repository_status( dashboard_ref: str, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_repository_status"): try: - dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, config_manager, env_id) + dashboard_id = await _resolve_dashboard_id_from_ref_async( + dashboard_ref, config_manager, env_id + ) return _resolve_repository_status(dashboard_id) except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_repository_status", e) + + # [/DEF:get_repository_status:Function] # [DEF:get_repository_status_batch:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Get Git statuses for multiple dashboard repositories in one request. # @PRE: `request.dashboard_ids` is provided. # @POST: Returns `statuses` map where each key is dashboard ID and value is repository status payload. @@ -1330,8 +1594,7 @@ async def get_repository_status( # @RETURN: RepoStatusBatchResponse @router.post("/repositories/status/batch", response_model=RepoStatusBatchResponse) async def get_repository_status_batch( - request: RepoStatusBatchRequest, - _ = Depends(has_permission("plugin:git", "EXECUTE")) + request: RepoStatusBatchRequest, _=Depends(has_permission("plugin:git", "EXECUTE")) ): with belief_scope("get_repository_status_batch"): dashboard_ids = list(dict.fromkeys(request.dashboard_ids)) @@ -1363,10 +1626,13 @@ async def get_repository_status_batch( "sync_status": "ERROR", } return RepoStatusBatchResponse(statuses=statuses) + + # [/DEF:get_repository_status_batch:Function] + # [DEF:get_repository_diff:Function] -# @COMPLEXITY: 3 +# @COMPLEXITY: 2 # @PURPOSE: Get Git diff for a dashboard repository. # @PRE: `dashboard_ref` repository exists. # @POST: Returns the diff text for the specified file or all changes. @@ -1381,19 +1647,24 @@ async def get_repository_diff( staged: bool = False, env_id: Optional[str] = None, config_manager=Depends(get_config_manager), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("get_repository_diff"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) diff_text = git_service.get_diff(dashboard_id, file_path, staged) return diff_text except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("get_repository_diff", e) + + # [/DEF:get_repository_diff:Function] + # [DEF:generate_commit_message:Function] # @COMPLEXITY: 3 # @PURPOSE: Generate a suggested commit message using LLM. @@ -1403,51 +1674,58 @@ async def get_repository_diff( async def generate_commit_message( dashboard_ref: str, env_id: Optional[str] = None, - config_manager = Depends(get_config_manager), + config_manager=Depends(get_config_manager), db: Session = Depends(get_db), - _ = Depends(has_permission("plugin:git", "EXECUTE")) + _=Depends(has_permission("plugin:git", "EXECUTE")), ): with belief_scope("generate_commit_message"): try: - dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id) + dashboard_id = _resolve_dashboard_id_from_ref( + dashboard_ref, config_manager, env_id + ) # 1. Get Diff diff = git_service.get_diff(dashboard_id, staged=True) if not diff: diff = git_service.get_diff(dashboard_id, staged=False) - + if not diff: return {"message": "No changes detected"} # 2. Get History history_objs = git_service.get_commit_history(dashboard_id, limit=5) - history = [h.message for h in history_objs if hasattr(h, 'message')] + history = [h.message for h in history_objs if hasattr(h, "message")] # 3. Get LLM Client from ...services.llm_provider import LLMProviderService from ...plugins.llm_analysis.service import LLMClient from ...plugins.llm_analysis.models import LLMProviderType - + llm_service = LLMProviderService(db) providers = llm_service.get_all_providers() - llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm) + llm_settings = normalize_llm_settings( + config_manager.get_config().settings.llm + ) bound_provider_id = resolve_bound_provider_id(llm_settings, "git_commit") provider = next((p for p in providers if p.id == bound_provider_id), None) if not provider: provider = next((p for p in providers if p.is_active), None) - + if not provider: - raise HTTPException(status_code=400, detail="No active LLM provider found") - + raise HTTPException( + status_code=400, detail="No active LLM provider found" + ) + api_key = llm_service.get_decrypted_api_key(provider.id) client = LLMClient( provider_type=LLMProviderType(provider.provider_type), api_key=api_key, base_url=provider.base_url, - default_model=provider.default_model + default_model=provider.default_model, ) # 4. Generate Message from ...plugins.git.llm_extension import GitLLMExtension + extension = GitLLMExtension(client) git_prompt = llm_settings["prompts"].get( "git_commit_prompt", @@ -1458,12 +1736,14 @@ async def generate_commit_message( history, prompt_template=git_prompt, ) - + return {"message": message} except HTTPException: raise except Exception as e: _handle_unexpected_git_route_error("generate_commit_message", e) + + # [/DEF:generate_commit_message:Function] -# [/DEF:backend.src.api.routes.git:Module] +# [/DEF:GitApi:Module] diff --git a/backend/src/plugins/llm_analysis/__tests__/test_screenshot_service.py b/backend/src/plugins/llm_analysis/__tests__/test_screenshot_service.py index 3425ad9f..93c48373 100644 --- a/backend/src/plugins/llm_analysis/__tests__/test_screenshot_service.py +++ b/backend/src/plugins/llm_analysis/__tests__/test_screenshot_service.py @@ -1,4 +1,5 @@ -# [DEF:backend.src.plugins.llm_analysis.__tests__.test_screenshot_service:Module] +# [DEF:TestScreenshotService:Module] +# @RELATION: VERIFIES ->[src.plugins.llm_analysis.service.ScreenshotService] # @COMPLEXITY: 3 # @SEMANTICS: tests, screenshot-service, navigation, timeout-regression # @PURPOSE: Protect dashboard screenshot navigation from brittle networkidle waits. @@ -9,6 +10,7 @@ from src.plugins.llm_analysis.service import ScreenshotService # [DEF:test_iter_login_roots_includes_child_frames:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Login discovery must search embedded auth frames, not only the main page. # @PRE: Page exposes child frames list. # @POST: Returned roots include page plus child frames in order. @@ -21,10 +23,13 @@ def test_iter_login_roots_includes_child_frames(): roots = service._iter_login_roots(fake_page) assert roots == [fake_page, frame_a, frame_b] + + # [/DEF:test_iter_login_roots_includes_child_frames:Function] # [DEF:test_response_looks_like_login_page_detects_login_markup:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Direct login fallback must reject responses that render the login screen again. # @PRE: Response body contains stable login-page markers. # @POST: Helper returns True so caller treats fallback as failed authentication. @@ -45,10 +50,13 @@ def test_response_looks_like_login_page_detects_login_markup(): ) assert result is True + + # [/DEF:test_response_looks_like_login_page_detects_login_markup:Function] # [DEF:test_find_first_visible_locator_skips_hidden_first_match:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Locator helper must not reject a selector collection just because its first element is hidden. # @PRE: First matched element is hidden and second matched element is visible. # @POST: Helper returns the second visible candidate. @@ -73,18 +81,23 @@ async def test_find_first_visible_locator_skips_hidden_first_match(): return self._elements[index] service = ScreenshotService(env=type("Env", (), {})()) - hidden_then_visible = _FakeLocator([ - _FakeElement(False, "hidden"), - _FakeElement(True, "visible"), - ]) + hidden_then_visible = _FakeLocator( + [ + _FakeElement(False, "hidden"), + _FakeElement(True, "visible"), + ] + ) result = await service._find_first_visible_locator([hidden_then_visible]) assert result.label == "visible" + + # [/DEF:test_find_first_visible_locator_skips_hidden_first_match:Function] # [DEF:test_submit_login_via_form_post_uses_browser_context_request:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Fallback login must submit hidden fields and credentials through the context request cookie jar. # @PRE: Login DOM exposes csrf hidden field and request context returns authenticated HTML. # @POST: Helper returns True and request payload contains csrf_token plus credentials plus request options. @@ -122,15 +135,25 @@ async def test_submit_login_via_form_post_uses_browser_context_request(): def __init__(self): self.calls = [] - async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None): - self.calls.append({ - "url": url, - "form": dict(form or {}), - "headers": dict(headers or {}), - "timeout": timeout, - "fail_on_status_code": fail_on_status_code, - "max_redirects": max_redirects, - }) + async def post( + self, + url, + form=None, + headers=None, + timeout=None, + fail_on_status_code=None, + max_redirects=None, + ): + self.calls.append( + { + "url": url, + "form": dict(form or {}), + "headers": dict(headers or {}), + "timeout": timeout, + "fail_on_status_code": fail_on_status_code, + "max_redirects": max_redirects, + } + ) return _FakeResponse() class _FakeContext: @@ -144,39 +167,48 @@ async def test_submit_login_via_form_post_uses_browser_context_request(): def locator(self, selector): if selector == "input[type='hidden'][name]": - return _FakeLocator([ - _FakeInput("csrf_token", "csrf-123"), - _FakeInput("next", "/superset/welcome/"), - ]) + return _FakeLocator( + [ + _FakeInput("csrf_token", "csrf-123"), + _FakeInput("next", "/superset/welcome/"), + ] + ) return _FakeLocator([]) env = type("Env", (), {"username": "admin", "password": "secret"})() service = ScreenshotService(env=env) page = _FakePage() - result = await service._submit_login_via_form_post(page, "https://example.test/login/") + result = await service._submit_login_via_form_post( + page, "https://example.test/login/" + ) assert result is True - assert page.context.request.calls == [{ - "url": "https://example.test/login/", - "form": { - "csrf_token": "csrf-123", - "next": "/superset/welcome/", - "username": "admin", - "password": "secret", - }, - "headers": { - "Origin": "https://example.test", - "Referer": "https://example.test/login/", - }, - "timeout": 10000, - "fail_on_status_code": False, - "max_redirects": 0, - }] + assert page.context.request.calls == [ + { + "url": "https://example.test/login/", + "form": { + "csrf_token": "csrf-123", + "next": "/superset/welcome/", + "username": "admin", + "password": "secret", + }, + "headers": { + "Origin": "https://example.test", + "Referer": "https://example.test/login/", + }, + "timeout": 10000, + "fail_on_status_code": False, + "max_redirects": 0, + } + ] + + # [/DEF:test_submit_login_via_form_post_uses_browser_context_request:Function] # [DEF:test_submit_login_via_form_post_accepts_authenticated_redirect:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Fallback login must treat non-login 302 redirect as success without waiting for redirect target. # @PRE: Request response is 302 with Location outside login path. # @POST: Helper returns True. @@ -212,7 +244,15 @@ async def test_submit_login_via_form_post_accepts_authenticated_redirect(): return "" class _FakeRequest: - async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None): + async def post( + self, + url, + form=None, + headers=None, + timeout=None, + fail_on_status_code=None, + max_redirects=None, + ): return _FakeResponse() class _FakeContext: @@ -232,13 +272,18 @@ async def test_submit_login_via_form_post_accepts_authenticated_redirect(): env = type("Env", (), {"username": "admin", "password": "secret"})() service = ScreenshotService(env=env) - result = await service._submit_login_via_form_post(_FakePage(), "https://example.test/login/") + result = await service._submit_login_via_form_post( + _FakePage(), "https://example.test/login/" + ) assert result is True + + # [/DEF:test_submit_login_via_form_post_accepts_authenticated_redirect:Function] # [DEF:test_submit_login_via_form_post_rejects_login_markup_response:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Fallback login must fail when POST response still contains login form content. # @PRE: Login DOM exposes csrf hidden field and request response renders login markup. # @POST: Helper returns False. @@ -282,7 +327,15 @@ async def test_submit_login_via_form_post_rejects_login_markup_response(): """ class _FakeRequest: - async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None): + async def post( + self, + url, + form=None, + headers=None, + timeout=None, + fail_on_status_code=None, + max_redirects=None, + ): return _FakeResponse() class _FakeContext: @@ -302,13 +355,18 @@ async def test_submit_login_via_form_post_rejects_login_markup_response(): env = type("Env", (), {"username": "admin", "password": "secret"})() service = ScreenshotService(env=env) - result = await service._submit_login_via_form_post(_FakePage(), "https://example.test/login/") + result = await service._submit_login_via_form_post( + _FakePage(), "https://example.test/login/" + ) assert result is False + + # [/DEF:test_submit_login_via_form_post_rejects_login_markup_response:Function] # [DEF:test_goto_resilient_falls_back_from_domcontentloaded_to_load:Function] +# @RELATION: BINDS_TO ->[TestScreenshotService] # @PURPOSE: Pages with unstable primary wait must retry with fallback wait strategy. # @PRE: First page.goto call raises; second succeeds. # @POST: Helper returns second response and attempts both wait modes in order. @@ -340,5 +398,7 @@ async def test_goto_resilient_falls_back_from_domcontentloaded_to_load(): ("https://example.test/dashboard", "domcontentloaded", 1234), ("https://example.test/dashboard", "load", 1234), ] + + # [/DEF:test_goto_resilient_falls_back_from_domcontentloaded_to_load:Function] -# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_screenshot_service:Module] +# [/DEF:TestScreenshotService:Module] diff --git a/backend/src/services/__tests__/test_health_service.py b/backend/src/services/__tests__/test_health_service.py index d05f1bc2..858591fa 100644 --- a/backend/src/services/__tests__/test_health_service.py +++ b/backend/src/services/__tests__/test_health_service.py @@ -7,6 +7,8 @@ from src.models.llm import ValidationRecord # [DEF:test_health_service:Module] # @COMPLEXITY: 3 # @PURPOSE: Unit tests for HealthService aggregation logic. +# @RELATION: VERIFIES ->[src.services.health_service.HealthService] + @pytest.mark.asyncio async def test_get_health_summary_aggregation(): @@ -15,9 +17,9 @@ async def test_get_health_summary_aggregation(): """ # Setup: Mock DB session db = MagicMock() - + now = datetime.utcnow() - + # Dashboard 1: Old FAIL, New PASS rec1_old = ValidationRecord( id="rec-old", @@ -26,7 +28,7 @@ async def test_get_health_summary_aggregation(): status="FAIL", timestamp=now - timedelta(hours=1), summary="Old failure", - issues=[] + issues=[], ) rec1_new = ValidationRecord( id="rec-new", @@ -35,9 +37,9 @@ async def test_get_health_summary_aggregation(): status="PASS", timestamp=now, summary="New pass", - issues=[] + issues=[], ) - + # Dashboard 2: Single WARN rec2 = ValidationRecord( id="rec-warn", @@ -46,28 +48,28 @@ async def test_get_health_summary_aggregation(): status="WARN", timestamp=now, summary="Warning", - issues=[] + issues=[], ) - + # Mock the query chain # subquery = self.db.query(...).filter(...).group_by(...).subquery() # query = self.db.query(ValidationRecord).join(subquery, ...).all() - + mock_query = db.query.return_value mock_query.filter.return_value = mock_query mock_query.group_by.return_value = mock_query mock_query.subquery.return_value = MagicMock() - + db.query.return_value.join.return_value.all.return_value = [rec1_new, rec2] - + service = HealthService(db) summary = await service.get_health_summary(environment_id="env_1") - + assert summary.pass_count == 1 assert summary.warn_count == 1 assert summary.fail_count == 0 assert len(summary.items) == 2 - + # Verify dash_1 has the latest status (PASS) dash_1_item = next(item for item in summary.items if item.dashboard_id == "dash_1") assert dash_1_item.status == "PASS" @@ -75,6 +77,7 @@ async def test_get_health_summary_aggregation(): assert dash_1_item.record_id == rec1_new.id assert dash_1_item.dashboard_slug == "dash_1" + @pytest.mark.asyncio async def test_get_health_summary_empty(): """ @@ -82,10 +85,10 @@ async def test_get_health_summary_empty(): """ db = MagicMock() db.query.return_value.join.return_value.all.return_value = [] - + service = HealthService(db) summary = await service.get_health_summary(environment_id="env_none") - + assert summary.pass_count == 0 assert len(summary.items) == 0 @@ -159,6 +162,8 @@ async def test_get_health_summary_reuses_dashboard_metadata_cache_across_service HealthService._dashboard_summary_cache.clear() +# [DEF:test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks:Function] +# @RELATION: BINDS_TO ->[test_health_service] def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks(): db = MagicMock() config_manager = MagicMock() @@ -222,12 +227,17 @@ def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks(): db.commit.assert_called_once() cleanup_instance.delete_task_with_logs.assert_any_call("task-1") cleanup_instance.delete_task_with_logs.assert_any_call("task-2") - cleanup_instance.delete_task_with_logs.call_count == 2 + assert cleanup_instance.delete_task_with_logs.call_count == 2 assert "task-1" not in task_manager.tasks assert "task-2" not in task_manager.tasks assert "task-3" in task_manager.tasks +# [/DEF:test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks:Function] + + +# [DEF:test_delete_validation_report_returns_false_for_unknown_record:Function] +# @RELATION: BINDS_TO ->[test_health_service] def test_delete_validation_report_returns_false_for_unknown_record(): db = MagicMock() db.query.return_value.filter.return_value.first.return_value = None @@ -237,6 +247,11 @@ def test_delete_validation_report_returns_false_for_unknown_record(): assert service.delete_validation_report("missing") is False +# [/DEF:test_delete_validation_report_returns_false_for_unknown_record:Function] + + +# [DEF:test_delete_validation_report_swallows_linked_task_cleanup_failure:Function] +# @RELATION: BINDS_TO ->[test_health_service] def test_delete_validation_report_swallows_linked_task_cleanup_failure(): db = MagicMock() config_manager = MagicMock() @@ -264,11 +279,14 @@ def test_delete_validation_report_swallows_linked_task_cleanup_failure(): db.query.side_effect = [first_query, peer_query] - with patch("src.services.health_service.TaskCleanupService") as cleanup_cls, patch( - "src.services.health_service.logger" - ) as mock_logger: + with ( + patch("src.services.health_service.TaskCleanupService") as cleanup_cls, + patch("src.services.health_service.logger") as mock_logger, + ): cleanup_instance = MagicMock() - cleanup_instance.delete_task_with_logs.side_effect = RuntimeError("cleanup exploded") + cleanup_instance.delete_task_with_logs.side_effect = RuntimeError( + "cleanup exploded" + ) cleanup_cls.return_value = cleanup_instance service = HealthService(db, config_manager=config_manager) @@ -282,4 +300,5 @@ def test_delete_validation_report_swallows_linked_task_cleanup_failure(): assert "task-1" not in task_manager.tasks +# [/DEF:test_delete_validation_report_swallows_linked_task_cleanup_failure:Function] # [/DEF:test_health_service:Module] diff --git a/backend/src/services/__tests__/test_llm_plugin_persistence.py b/backend/src/services/__tests__/test_llm_plugin_persistence.py index 76109823..0946130c 100644 --- a/backend/src/services/__tests__/test_llm_plugin_persistence.py +++ b/backend/src/services/__tests__/test_llm_plugin_persistence.py @@ -1,4 +1,5 @@ -# [DEF:backend.src.services.__tests__.test_llm_plugin_persistence:Module] +# [DEF:test_llm_plugin_persistence:Module] +# @RELATION: VERIFIES ->[src.plugins.llm_analysis.plugin.DashboardValidationPlugin] # @COMPLEXITY: 3 # @PURPOSE: Regression test for ValidationRecord persistence fields populated from task context. @@ -9,6 +10,7 @@ from src.plugins.llm_analysis import plugin as plugin_module # [DEF:_DummyLogger:Class] +# @RELATION: BINDS_TO ->[test_llm_plugin_persistence] # @PURPOSE: Minimal logger shim for TaskContext-like objects used in tests. class _DummyLogger: def with_source(self, _source: str): @@ -25,10 +27,13 @@ class _DummyLogger: def error(self, *_args, **_kwargs): return None + + # [/DEF:_DummyLogger:Class] # [DEF:_FakeDBSession:Class] +# @RELATION: BINDS_TO ->[test_llm_plugin_persistence] # @PURPOSE: Captures persisted records for assertion and mimics SQLAlchemy session methods used by plugin. class _FakeDBSession: def __init__(self): @@ -44,13 +49,18 @@ class _FakeDBSession: def close(self): self.closed = True + + # [/DEF:_FakeDBSession:Class] # [DEF:test_dashboard_validation_plugin_persists_task_and_environment_ids:Function] +# @RELATION: BINDS_TO ->[test_llm_plugin_persistence] # @PURPOSE: Ensure db ValidationRecord includes context.task_id and params.environment_id. @pytest.mark.asyncio -async def test_dashboard_validation_plugin_persists_task_and_environment_ids(tmp_path, monkeypatch): +async def test_dashboard_validation_plugin_persists_task_and_environment_ids( + tmp_path, monkeypatch +): fake_db = _FakeDBSession() env = types.SimpleNamespace(id="env-42") @@ -112,7 +122,9 @@ async def test_dashboard_validation_plugin_persists_task_and_environment_ids(tmp class _FakeSupersetClient: def __init__(self, _env): - self.network = types.SimpleNamespace(request=lambda **_kwargs: {"result": []}) + self.network = types.SimpleNamespace( + request=lambda **_kwargs: {"result": []} + ) monkeypatch.setattr(plugin_module, "SessionLocal", lambda: fake_db) monkeypatch.setattr(plugin_module, "LLMProviderService", _FakeProviderService) @@ -120,7 +132,9 @@ async def test_dashboard_validation_plugin_persists_task_and_environment_ids(tmp monkeypatch.setattr(plugin_module, "LLMClient", _FakeLLMClient) monkeypatch.setattr(plugin_module, "NotificationService", _FakeNotificationService) monkeypatch.setattr(plugin_module, "SupersetClient", _FakeSupersetClient) - monkeypatch.setattr("src.dependencies.get_config_manager", lambda: _FakeConfigManager()) + monkeypatch.setattr( + "src.dependencies.get_config_manager", lambda: _FakeConfigManager() + ) context = types.SimpleNamespace( task_id="task-999", @@ -144,7 +158,9 @@ async def test_dashboard_validation_plugin_persists_task_and_environment_ids(tmp assert fake_db.added is not None assert fake_db.added.task_id == "task-999" assert fake_db.added.environment_id == "env-42" + + # [/DEF:test_dashboard_validation_plugin_persists_task_and_environment_ids:Function] -# [/DEF:backend.src.services.__tests__.test_llm_plugin_persistence:Module] \ No newline at end of file +# [/DEF:test_llm_plugin_persistence:Module] diff --git a/backend/src/services/__tests__/test_resource_service.py b/backend/src/services/__tests__/test_resource_service.py index 2e100550..b8e69aba 100644 --- a/backend/src/services/__tests__/test_resource_service.py +++ b/backend/src/services/__tests__/test_resource_service.py @@ -1,10 +1,9 @@ -# [DEF:backend.src.services.__tests__.test_resource_service:Module] +# [DEF:TestResourceService:Module] # @COMPLEXITY: 3 # @SEMANTICS: resource-service, tests, dashboards, datasets, activity # @PURPOSE: Unit tests for ResourceService # @LAYER: Service -# @RELATION: TESTS -> backend.src.services.resource_service -# @RELATION: VERIFIES -> ResourceService +# @RELATION: VERIFIES ->[src.services.resource_service.ResourceService] # @INVARIANT: Resource summaries preserve task linkage and status projection behavior. import pytest @@ -13,25 +12,27 @@ from datetime import datetime, timezone # [DEF:test_get_dashboards_with_status:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @PURPOSE: Validate dashboard enrichment includes git/task status projections. # @TEST: get_dashboards_with_status returns dashboards with git and task status # @PRE: SupersetClient returns dashboard list # @POST: Each dashboard has git_status and last_task fields @pytest.mark.asyncio async def test_get_dashboards_with_status(): - with patch("src.services.resource_service.SupersetClient") as mock_client, \ - patch("src.services.resource_service.GitService"): - + with ( + patch("src.services.resource_service.SupersetClient") as mock_client, + patch("src.services.resource_service.GitService"), + ): from src.services.resource_service import ResourceService - + service = ResourceService() - + # Mock Superset response mock_client.return_value.get_dashboards_summary.return_value = [ {"id": 1, "title": "Dashboard 1", "slug": "dash-1"}, - {"id": 2, "title": "Dashboard 2", "slug": "dash-2"} + {"id": 2, "title": "Dashboard 2", "slug": "dash-2"}, ] - + # Mock tasks task_prod_old = MagicMock() task_prod_old.id = "task-123" @@ -62,7 +63,7 @@ async def test_get_dashboards_with_status(): env, [task_prod_old, task_prod_new, task_other_env], ) - + assert len(result) == 2 assert result[0]["id"] == 1 assert "git_status" in result[0] @@ -76,35 +77,35 @@ async def test_get_dashboards_with_status(): # [DEF:test_get_datasets_with_status:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: get_datasets_with_status returns datasets with task status # @PRE: SupersetClient returns dataset list # @POST: Each dataset has last_task field @pytest.mark.asyncio async def test_get_datasets_with_status(): with patch("src.services.resource_service.SupersetClient") as mock_client: - from src.services.resource_service import ResourceService - + service = ResourceService() - + # Mock Superset response mock_client.return_value.get_datasets_summary.return_value = [ {"id": 1, "table_name": "users", "schema": "public", "database": "app"}, - {"id": 2, "table_name": "orders", "schema": "public", "database": "app"} + {"id": 2, "table_name": "orders", "schema": "public", "database": "app"}, ] - + # Mock tasks mock_task = MagicMock() mock_task.id = "task-456" mock_task.status = "RUNNING" mock_task.params = {"resource_id": "dataset-1"} mock_task.created_at = datetime.now() - + env = MagicMock() env.id = "prod" - + result = await service.get_datasets_with_status(env, [mock_task]) - + assert len(result) == 2 assert result[0]["table_name"] == "users" assert "last_task" in result[0] @@ -116,35 +117,36 @@ async def test_get_datasets_with_status(): # [DEF:test_get_activity_summary:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: get_activity_summary returns active count and recent tasks # @PRE: tasks list provided # @POST: Returns dict with active_count and recent_tasks def test_get_activity_summary(): from src.services.resource_service import ResourceService - + service = ResourceService() - + # Create mock tasks task1 = MagicMock() task1.id = "task-1" task1.status = "RUNNING" task1.params = {"resource_name": "Dashboard 1", "resource_type": "dashboard"} task1.created_at = datetime(2024, 1, 1, 10, 0, 0) - + task2 = MagicMock() task2.id = "task-2" task2.status = "SUCCESS" task2.params = {"resource_name": "Dataset 1", "resource_type": "dataset"} task2.created_at = datetime(2024, 1, 1, 9, 0, 0) - + task3 = MagicMock() task3.id = "task-3" task3.status = "WAITING_INPUT" task3.params = {"resource_name": "Dashboard 2", "resource_type": "dashboard"} task3.created_at = datetime(2024, 1, 1, 8, 0, 0) - + result = service.get_activity_summary([task1, task2, task3]) - + assert result["active_count"] == 2 # RUNNING + WAITING_INPUT assert len(result["recent_tasks"]) == 3 @@ -153,51 +155,52 @@ def test_get_activity_summary(): # [DEF:test_get_git_status_for_dashboard_no_repo:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _get_git_status_for_dashboard returns None when no repo exists # @PRE: GitService returns None for repo # @POST: Returns None def test_get_git_status_for_dashboard_no_repo(): with patch("src.services.resource_service.GitService") as mock_git: - from src.services.resource_service import ResourceService - + service = ResourceService() mock_git.return_value.get_repo.return_value = None - + result = service._get_git_status_for_dashboard(123) - + assert result is not None - assert result['sync_status'] == 'NO_REPO' - assert result['has_repo'] is False + assert result["sync_status"] == "NO_REPO" + assert result["has_repo"] is False # [/DEF:test_get_git_status_for_dashboard_no_repo:Function] # [DEF:test_get_last_task_for_resource:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _get_last_task_for_resource returns most recent task for resource # @PRE: tasks list with matching resource_id # @POST: Returns task summary with task_id and status def test_get_last_task_for_resource(): from src.services.resource_service import ResourceService - + service = ResourceService() - + # Create mock tasks task1 = MagicMock() task1.id = "task-old" task1.status = "SUCCESS" task1.params = {"resource_id": "dashboard-1"} task1.created_at = datetime(2024, 1, 1, 10, 0, 0) - + task2 = MagicMock() task2.id = "task-new" task2.status = "RUNNING" task2.params = {"resource_id": "dashboard-1"} task2.created_at = datetime(2024, 1, 1, 12, 0, 0) - + result = service._get_last_task_for_resource("dashboard-1", [task1, task2]) - + assert result is not None assert result["task_id"] == "task-new" # Most recent assert result["status"] == "RUNNING" @@ -207,27 +210,28 @@ def test_get_last_task_for_resource(): # [DEF:test_extract_resource_name_from_task:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _extract_resource_name_from_task extracts name from params # @PRE: task has resource_name in params # @POST: Returns resource name or fallback def test_extract_resource_name_from_task(): from src.services.resource_service import ResourceService - + service = ResourceService() - + # Task with resource_name task = MagicMock() task.id = "task-123" task.params = {"resource_name": "My Dashboard"} - + result = service._extract_resource_name_from_task(task) assert result == "My Dashboard" - + # Task without resource_name task2 = MagicMock() task2.id = "task-456" task2.params = {} - + result2 = service._extract_resource_name_from_task(task2) assert "task-456" in result2 @@ -236,48 +240,56 @@ def test_extract_resource_name_from_task(): # [DEF:test_get_last_task_for_resource_empty_tasks:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _get_last_task_for_resource returns None for empty tasks list # @PRE: tasks is empty list # @POST: Returns None def test_get_last_task_for_resource_empty_tasks(): from src.services.resource_service import ResourceService - + service = ResourceService() - + result = service._get_last_task_for_resource("dashboard-1", []) assert result is None + + # [/DEF:test_get_last_task_for_resource_empty_tasks:Function] # [DEF:test_get_last_task_for_resource_no_match:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _get_last_task_for_resource returns None when no tasks match resource_id # @PRE: tasks list has no matching resource_id # @POST: Returns None def test_get_last_task_for_resource_no_match(): from src.services.resource_service import ResourceService - + service = ResourceService() - + task = MagicMock() task.id = "task-999" task.status = "SUCCESS" task.params = {"resource_id": "dashboard-99"} task.created_at = datetime(2024, 1, 1, 10, 0, 0) - + result = service._get_last_task_for_resource("dashboard-1", [task]) assert result is None + + # [/DEF:test_get_last_task_for_resource_no_match:Function] # [DEF:test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_datetimes:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: get_dashboards_with_status handles mixed naive/aware datetimes without comparison errors. # @PRE: Task list includes both timezone-aware and timezone-naive timestamps. # @POST: Latest task is selected deterministically and no exception is raised. @pytest.mark.asyncio async def test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_datetimes(): - with patch("src.services.resource_service.SupersetClient") as mock_client, \ - patch("src.services.resource_service.GitService"): - + with ( + patch("src.services.resource_service.SupersetClient") as mock_client, + patch("src.services.resource_service.GitService"), + ): from src.services.resource_service import ResourceService service = ResourceService() @@ -305,18 +317,22 @@ async def test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_dat result = await service.get_dashboards_with_status(env, [task_naive, task_aware]) assert result[0]["last_task"]["task_id"] == "task-aware" + + # [/DEF:test_get_dashboards_with_status_handles_mixed_naive_and_aware_task_datetimes:Function] # [DEF:test_get_dashboards_with_status_prefers_latest_decisive_validation_status_over_newer_unknown:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: get_dashboards_with_status keeps latest task identity while falling back to older decisive validation status. # @PRE: Same dashboard has older WARN and newer UNKNOWN validation tasks. # @POST: Returned last_task points to newest task but preserves WARN as last meaningful validation state. @pytest.mark.anyio async def test_get_dashboards_with_status_prefers_latest_decisive_validation_status_over_newer_unknown(): - with patch("src.services.resource_service.SupersetClient") as mock_client, \ - patch("src.services.resource_service.GitService"): - + with ( + patch("src.services.resource_service.SupersetClient") as mock_client, + patch("src.services.resource_service.GitService"), + ): from src.services.resource_service import ResourceService service = ResourceService() @@ -343,23 +359,29 @@ async def test_get_dashboards_with_status_prefers_latest_decisive_validation_sta env = MagicMock() env.id = "prod" - result = await service.get_dashboards_with_status(env, [task_warn, task_unknown]) + result = await service.get_dashboards_with_status( + env, [task_warn, task_unknown] + ) assert result[0]["last_task"]["task_id"] == "task-unknown" assert result[0]["last_task"]["status"] == "RUNNING" assert result[0]["last_task"]["validation_status"] == "WARN" + + # [/DEF:test_get_dashboards_with_status_prefers_latest_decisive_validation_status_over_newer_unknown:Function] # [DEF:test_get_dashboards_with_status_falls_back_to_latest_unknown_without_decisive_history:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: get_dashboards_with_status still returns newest UNKNOWN when no decisive validation exists. # @PRE: Same dashboard has only UNKNOWN validation tasks. # @POST: Returned last_task keeps newest UNKNOWN task. @pytest.mark.anyio async def test_get_dashboards_with_status_falls_back_to_latest_unknown_without_decisive_history(): - with patch("src.services.resource_service.SupersetClient") as mock_client, \ - patch("src.services.resource_service.GitService"): - + with ( + patch("src.services.resource_service.SupersetClient") as mock_client, + patch("src.services.resource_service.GitService"), + ): from src.services.resource_service import ResourceService service = ResourceService() @@ -386,14 +408,19 @@ async def test_get_dashboards_with_status_falls_back_to_latest_unknown_without_d env = MagicMock() env.id = "prod" - result = await service.get_dashboards_with_status(env, [task_unknown_old, task_unknown_new]) + result = await service.get_dashboards_with_status( + env, [task_unknown_old, task_unknown_new] + ) assert result[0]["last_task"]["task_id"] == "task-unknown-new" assert result[0]["last_task"]["validation_status"] == "UNKNOWN" + + # [/DEF:test_get_dashboards_with_status_falls_back_to_latest_unknown_without_decisive_history:Function] # [DEF:test_get_last_task_for_resource_handles_mixed_naive_and_aware_created_at:Function] +# @RELATION: BINDS_TO ->[TestResourceService] # @TEST: _get_last_task_for_resource handles mixed naive/aware created_at values. # @PRE: Matching tasks include naive and aware created_at timestamps. # @POST: Latest task is returned without raising datetime comparison errors. @@ -414,11 +441,15 @@ def test_get_last_task_for_resource_handles_mixed_naive_and_aware_created_at(): task_aware.params = {"resource_id": "dashboard-1"} task_aware.created_at = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc) - result = service._get_last_task_for_resource("dashboard-1", [task_naive, task_aware]) + result = service._get_last_task_for_resource( + "dashboard-1", [task_naive, task_aware] + ) assert result is not None assert result["task_id"] == "task-new" + + # [/DEF:test_get_last_task_for_resource_handles_mixed_naive_and_aware_created_at:Function] -# [/DEF:backend.src.services.__tests__.test_resource_service:Module] +# [/DEF:TestResourceService:Module] diff --git a/backend/tests/core/test_mapping_service.py b/backend/tests/core/test_mapping_service.py index dd7f2102..921c1c5a 100644 --- a/backend/tests/core/test_mapping_service.py +++ b/backend/tests/core/test_mapping_service.py @@ -1,9 +1,9 @@ -# [DEF:backend.tests.core.test_mapping_service:Module] +# [DEF:TestMappingService:Module] # # @COMPLEXITY: 3 # @PURPOSE: Unit tests for the IdMappingService matching UUIDs to integer IDs. # @LAYER: Domain -# @RELATION: VERIFIES -> backend.src.core.mapping_service +# @RELATION: VERIFIES ->[src.core.mapping_service.IdMappingService] # import pytest from datetime import datetime, timezone @@ -21,16 +21,18 @@ if backend_dir not in sys.path: from src.models.mapping import Base, ResourceMapping, ResourceType from src.core.mapping_service import IdMappingService + @pytest.fixture def db_session(): # In-memory SQLite for testing - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() yield session session.close() + class MockSupersetClient: def __init__(self, resources): self.resources = resources @@ -38,16 +40,25 @@ class MockSupersetClient: def get_all_resources(self, endpoint, since_dttm=None): return self.resources.get(endpoint, []) + +# [DEF:test_sync_environment_upserts_correctly:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_upserts_correctly(db_session): service = IdMappingService(db_session) - mock_client = MockSupersetClient({ - "chart": [ - {"id": 42, "uuid": "123e4567-e89b-12d3-a456-426614174000", "slice_name": "Test Chart"} - ] - }) + mock_client = MockSupersetClient( + { + "chart": [ + { + "id": 42, + "uuid": "123e4567-e89b-12d3-a456-426614174000", + "slice_name": "Test Chart", + } + ] + } + ) service.sync_environment("test-env", mock_client) - + mapping = db_session.query(ResourceMapping).first() assert mapping is not None assert mapping.environment_id == "test-env" @@ -56,6 +67,12 @@ def test_sync_environment_upserts_correctly(db_session): assert mapping.remote_integer_id == "42" assert mapping.resource_name == "Test Chart" + +# [/DEF:test_sync_environment_upserts_correctly:Function] + + +# [DEF:test_get_remote_id_returns_integer:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_get_remote_id_returns_integer(db_session): service = IdMappingService(db_session) mapping = ResourceMapping( @@ -64,7 +81,7 @@ def test_get_remote_id_returns_integer(db_session): uuid="uuid-1", remote_integer_id="99", resource_name="Test DS", - last_synced_at=datetime.now(timezone.utc) + last_synced_at=datetime.now(timezone.utc), ) db_session.add(mapping) db_session.commit() @@ -72,80 +89,126 @@ def test_get_remote_id_returns_integer(db_session): result = service.get_remote_id("test-env", ResourceType.DATASET, "uuid-1") assert result == 99 + +# [/DEF:test_get_remote_id_returns_integer:Function] + + +# [DEF:test_get_remote_ids_batch_returns_dict:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_get_remote_ids_batch_returns_dict(db_session): service = IdMappingService(db_session) m1 = ResourceMapping( environment_id="test-env", resource_type=ResourceType.DASHBOARD, uuid="uuid-1", - remote_integer_id="11" + remote_integer_id="11", ) m2 = ResourceMapping( environment_id="test-env", resource_type=ResourceType.DASHBOARD, uuid="uuid-2", - remote_integer_id="22" + remote_integer_id="22", ) db_session.add_all([m1, m2]) db_session.commit() - result = service.get_remote_ids_batch("test-env", ResourceType.DASHBOARD, ["uuid-1", "uuid-2", "uuid-missing"]) - + result = service.get_remote_ids_batch( + "test-env", ResourceType.DASHBOARD, ["uuid-1", "uuid-2", "uuid-missing"] + ) + assert len(result) == 2 assert result["uuid-1"] == 11 assert result["uuid-2"] == 22 assert "uuid-missing" not in result + +# [/DEF:test_get_remote_ids_batch_returns_dict:Function] + + +# [DEF:test_sync_environment_updates_existing_mapping:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_updates_existing_mapping(db_session): """Verify that sync_environment updates an existing mapping (upsert UPDATE path).""" from src.models.mapping import ResourceMapping + # Pre-populate a mapping existing = ResourceMapping( environment_id="test-env", resource_type=ResourceType.CHART, uuid="123e4567-e89b-12d3-a456-426614174000", remote_integer_id="10", - resource_name="Old Name" + resource_name="Old Name", ) db_session.add(existing) db_session.commit() service = IdMappingService(db_session) - mock_client = MockSupersetClient({ - "chart": [ - {"id": 42, "uuid": "123e4567-e89b-12d3-a456-426614174000", "slice_name": "Updated Name"} - ] - }) + mock_client = MockSupersetClient( + { + "chart": [ + { + "id": 42, + "uuid": "123e4567-e89b-12d3-a456-426614174000", + "slice_name": "Updated Name", + } + ] + } + ) service.sync_environment("test-env", mock_client) - mapping = db_session.query(ResourceMapping).filter_by( - uuid="123e4567-e89b-12d3-a456-426614174000" - ).first() + mapping = ( + db_session.query(ResourceMapping) + .filter_by(uuid="123e4567-e89b-12d3-a456-426614174000") + .first() + ) assert mapping.remote_integer_id == "42" assert mapping.resource_name == "Updated Name" # Should still be only one record (updated, not duplicated) count = db_session.query(ResourceMapping).count() assert count == 1 + +# [/DEF:test_sync_environment_updates_existing_mapping:Function] + + +# [DEF:test_sync_environment_skips_resources_without_uuid:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_skips_resources_without_uuid(db_session): """Resources missing uuid or having id=None should be silently skipped.""" service = IdMappingService(db_session) - mock_client = MockSupersetClient({ - "chart": [ - {"id": 42, "slice_name": "No UUID"}, # Missing 'uuid' -> skipped - {"id": None, "uuid": "valid-uuid", "slice_name": "ID is None"}, # id=None -> skipped - {"id": None, "uuid": None, "slice_name": "Both None"}, # both None -> skipped - ] - }) + mock_client = MockSupersetClient( + { + "chart": [ + {"id": 42, "slice_name": "No UUID"}, # Missing 'uuid' -> skipped + { + "id": None, + "uuid": "valid-uuid", + "slice_name": "ID is None", + }, # id=None -> skipped + { + "id": None, + "uuid": None, + "slice_name": "Both None", + }, # both None -> skipped + ] + } + ) service.sync_environment("test-env", mock_client) count = db_session.query(ResourceMapping).count() assert count == 0 + +# [/DEF:test_sync_environment_skips_resources_without_uuid:Function] + + +# [DEF:test_sync_environment_handles_api_error_gracefully:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_handles_api_error_gracefully(db_session): """If one resource type fails, others should still sync.""" + class FailingClient: def get_all_resources(self, endpoint, since_dttm=None): if endpoint == "chart": @@ -162,12 +225,24 @@ def test_sync_environment_handles_api_error_gracefully(db_session): mapping = db_session.query(ResourceMapping).first() assert mapping.resource_type == ResourceType.DATASET + +# [/DEF:test_sync_environment_handles_api_error_gracefully:Function] + + +# [DEF:test_get_remote_id_returns_none_for_missing:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_get_remote_id_returns_none_for_missing(db_session): """get_remote_id should return None when no mapping exists.""" service = IdMappingService(db_session) result = service.get_remote_id("test-env", ResourceType.CHART, "nonexistent-uuid") assert result is None + +# [/DEF:test_get_remote_id_returns_none_for_missing:Function] + + +# [DEF:test_get_remote_ids_batch_returns_empty_for_empty_input:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_get_remote_ids_batch_returns_empty_for_empty_input(db_session): """get_remote_ids_batch should return {} for an empty list of UUIDs.""" service = IdMappingService(db_session) @@ -175,70 +250,90 @@ def test_get_remote_ids_batch_returns_empty_for_empty_input(db_session): assert result == {} +# [/DEF:test_get_remote_ids_batch_returns_empty_for_empty_input:Function] + + +# [DEF:test_mapping_service_alignment_with_test_data:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_mapping_service_alignment_with_test_data(db_session): """**@TEST_DATA**: Verifies that the service aligns with the resource_mapping_record contract.""" # Contract: {'environment_id': 'prod-env-1', 'resource_type': 'chart', 'uuid': '123e4567-e89b-12d3-a456-426614174000', 'remote_integer_id': '42'} contract_data = { - 'environment_id': 'prod-env-1', - 'resource_type': ResourceType.CHART, - 'uuid': '123e4567-e89b-12d3-a456-426614174000', - 'remote_integer_id': '42' + "environment_id": "prod-env-1", + "resource_type": ResourceType.CHART, + "uuid": "123e4567-e89b-12d3-a456-426614174000", + "remote_integer_id": "42", } - + mapping = ResourceMapping(**contract_data) db_session.add(mapping) db_session.commit() service = IdMappingService(db_session) result = service.get_remote_id( - contract_data['environment_id'], - contract_data['resource_type'], - contract_data['uuid'] + contract_data["environment_id"], + contract_data["resource_type"], + contract_data["uuid"], ) - + assert result == 42 +# [/DEF:test_mapping_service_alignment_with_test_data:Function] + + +# [DEF:test_sync_environment_requires_existing_env:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_requires_existing_env(db_session): """**@PRE**: Verify behavior when environment_id is invalid/missing in DB. - Note: The current implementation doesn't strictly check for environment existencia in the DB + Note: The current implementation doesn't strictly check for environment existencia in the DB before polling, but it should handle it gracefully or follow the contract. """ service = IdMappingService(db_session) mock_client = MockSupersetClient({"chart": []}) - - # Even if environment doesn't exist in a hypothetical 'environments' table, + + # Even if environment doesn't exist in a hypothetical 'environments' table, # the service should still complete or fail according to defined error handling. # In GRACE-Poly, @PRE is a hard requirement. If we don't have an Env model check, # we simulate the intent. - + service.sync_environment("non-existent-env", mock_client) # If no error raised, at least verify no mappings were created for other envs assert db_session.query(ResourceMapping).count() == 0 +# [/DEF:test_sync_environment_requires_existing_env:Function] + +# [DEF:test_sync_environment_deletes_stale_mappings:Function] +# @RELATION: BINDS_TO ->[TestMappingService] def test_sync_environment_deletes_stale_mappings(db_session): """Verify that mappings for resources deleted from the remote environment are removed from the local DB on the next sync cycle.""" service = IdMappingService(db_session) # First sync: 2 charts exist - client_v1 = MockSupersetClient({ - "chart": [ - {"id": 1, "uuid": "aaa", "slice_name": "Chart A"}, - {"id": 2, "uuid": "bbb", "slice_name": "Chart B"}, - ] - }) + client_v1 = MockSupersetClient( + { + "chart": [ + {"id": 1, "uuid": "aaa", "slice_name": "Chart A"}, + {"id": 2, "uuid": "bbb", "slice_name": "Chart B"}, + ] + } + ) service.sync_environment("env1", client_v1) - assert db_session.query(ResourceMapping).filter_by(environment_id="env1").count() == 2 + assert ( + db_session.query(ResourceMapping).filter_by(environment_id="env1").count() == 2 + ) # Second sync: user deleted Chart B from superset - client_v2 = MockSupersetClient({ - "chart": [ - {"id": 1, "uuid": "aaa", "slice_name": "Chart A"}, - ] - }) + client_v2 = MockSupersetClient( + { + "chart": [ + {"id": 1, "uuid": "aaa", "slice_name": "Chart A"}, + ] + } + ) service.sync_environment("env1", client_v2) remaining = db_session.query(ResourceMapping).filter_by(environment_id="env1").all() @@ -246,4 +341,5 @@ def test_sync_environment_deletes_stale_mappings(db_session): assert remaining[0].uuid == "aaa" -# [/DEF:backend.tests.core.test_mapping_service:Module] +# [/DEF:test_sync_environment_deletes_stale_mappings:Function] +# [/DEF:TestMappingService:Module] diff --git a/backend/tests/test_dashboards_api.py b/backend/tests/test_dashboards_api.py index 13636940..eb76149e 100644 --- a/backend/tests/test_dashboards_api.py +++ b/backend/tests/test_dashboards_api.py @@ -1,4 +1,5 @@ -# [DEF:backend.tests.test_dashboards_api:Module] +# [DEF:TestDashboardsApi:Module] +# @RELATION: VERIFIES ->[src.api.routes.dashboards] # @COMPLEXITY: 3 # @PURPOSE: Comprehensive contract-driven tests for Dashboard Hub API # @LAYER: Domain (Tests) @@ -8,8 +9,20 @@ from fastapi.testclient import TestClient from unittest.mock import MagicMock, patch, AsyncMock from datetime import datetime, timezone from src.app import app -from src.api.routes.dashboards import DashboardsResponse, DashboardDetailResponse, DashboardTaskHistoryResponse, DatabaseMappingsResponse -from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service +from src.api.routes.dashboards import ( + DashboardsResponse, + DashboardDetailResponse, + DashboardTaskHistoryResponse, + DatabaseMappingsResponse, +) +from src.dependencies import ( + get_current_user, + has_permission, + get_config_manager, + get_task_manager, + get_resource_service, + get_mapping_service, +) # Global mock user mock_user = MagicMock() @@ -19,55 +32,73 @@ admin_role = MagicMock() admin_role.name = "Admin" mock_user.roles.append(admin_role) + @pytest.fixture(autouse=True) def mock_deps(): config_manager = MagicMock() task_manager = MagicMock() resource_service = MagicMock() mapping_service = MagicMock() - + app.dependency_overrides[get_config_manager] = lambda: config_manager app.dependency_overrides[get_task_manager] = lambda: task_manager app.dependency_overrides[get_resource_service] = lambda: resource_service app.dependency_overrides[get_mapping_service] = lambda: mapping_service app.dependency_overrides[get_current_user] = lambda: mock_user - + # Overrides for specific permission checks - app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user - app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user - app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user + app.dependency_overrides[has_permission("plugin:migration", "READ")] = ( + lambda: mock_user + ) + app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = ( + lambda: mock_user + ) + app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = ( + lambda: mock_user + ) app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user app.dependency_overrides[has_permission("dashboards", "READ")] = lambda: mock_user - + yield { "config": config_manager, "task": task_manager, "resource": resource_service, - "mapping": mapping_service + "mapping": mapping_service, } app.dependency_overrides.clear() - + + client = TestClient(app) # --- 1. get_dashboards tests --- + +# [DEF:test_get_dashboards_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_success(mock_deps): """Uses @TEST_FIXTURE: dashboard_list_happy data.""" mock_env = MagicMock() mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["task"].get_all_tasks.return_value = [] - + # @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"} - mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ - {"id": 1, "title": "Main Revenue", "slug": "main-revenue", "git_status": {"branch": "main", "sync_status": "OK"}} - ]) + mock_deps["resource"].get_dashboards_with_status = AsyncMock( + return_value=[ + { + "id": 1, + "title": "Main Revenue", + "slug": "main-revenue", + "git_status": {"branch": "main", "sync_status": "OK"}, + } + ] + ) response = client.get("/api/dashboards?env_id=prod&page=1&page_size=10") - + assert response.status_code == 200 data = response.json() - + # exhaustive @POST assertions assert "dashboards" in data assert len(data["dashboards"]) == 1 # @TEST_FIXTURE: expected_count: 1 @@ -76,26 +107,40 @@ def test_get_dashboards_success(mock_deps): assert data["page"] == 1 assert data["page_size"] == 10 assert data["total_pages"] == 1 - + # schema validation DashboardsResponse(**data) + +# [/DEF:test_get_dashboards_success:Function] + + +# [DEF:test_get_dashboards_with_search:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_with_search(mock_deps): mock_env = MagicMock() mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["task"].get_all_tasks.return_value = [] - mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[ - {"id": 1, "title": "Sales Report", "slug": "sales"}, - {"id": 2, "title": "Marketing", "slug": "marketing"} - ]) - + mock_deps["resource"].get_dashboards_with_status = AsyncMock( + return_value=[ + {"id": 1, "title": "Sales Report", "slug": "sales"}, + {"id": 2, "title": "Marketing", "slug": "marketing"}, + ] + ) + response = client.get("/api/dashboards?env_id=prod&search=sales") assert response.status_code == 200 data = response.json() assert len(data["dashboards"]) == 1 assert data["dashboards"][0]["title"] == "Sales Report" + +# [/DEF:test_get_dashboards_with_search:Function] + + +# [DEF:test_get_dashboards_empty:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_empty(mock_deps): """@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}""" mock_env = MagicMock() @@ -103,7 +148,7 @@ def test_get_dashboards_empty(mock_deps): mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["task"].get_all_tasks.return_value = [] mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[]) - + response = client.get("/api/dashboards?env_id=empty_env") assert response.status_code == 200 data = response.json() @@ -112,6 +157,12 @@ def test_get_dashboards_empty(mock_deps): assert data["total_pages"] == 1 DashboardsResponse(**data) + +# [/DEF:test_get_dashboards_empty:Function] + + +# [DEF:test_get_dashboards_superset_failure:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_superset_failure(mock_deps): """@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}""" mock_env = MagicMock() @@ -121,42 +172,62 @@ def test_get_dashboards_superset_failure(mock_deps): mock_deps["resource"].get_dashboards_with_status = AsyncMock( side_effect=Exception("Connection refused") ) - + response = client.get("/api/dashboards?env_id=bad_conn") assert response.status_code == 503 assert "Failed to fetch dashboards" in response.json()["detail"] + +# [/DEF:test_get_dashboards_superset_failure:Function] + + +# [DEF:test_get_dashboards_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_env_not_found(mock_deps): mock_deps["config"].get_environments.return_value = [] response = client.get("/api/dashboards?env_id=nonexistent") assert response.status_code == 404 assert "Environment not found" in response.json()["detail"] + +# [/DEF:test_get_dashboards_env_not_found:Function] + + +# [DEF:test_get_dashboards_invalid_pagination:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboards_invalid_pagination(mock_deps): mock_env = MagicMock() mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] - + # page < 1 assert client.get("/api/dashboards?env_id=prod&page=0").status_code == 400 assert client.get("/api/dashboards?env_id=prod&page=-1").status_code == 400 - + # page_size < 1 assert client.get("/api/dashboards?env_id=prod&page_size=0").status_code == 400 - + # page_size > 100 assert client.get("/api/dashboards?env_id=prod&page_size=101").status_code == 400 + # --- 2. get_database_mappings tests --- +# [/DEF:test_get_dashboards_invalid_pagination:Function] + + +# [DEF:test_get_database_mappings_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_database_mappings_success(mock_deps): - mock_s = MagicMock(); mock_s.id = "s" - mock_t = MagicMock(); mock_t.id = "t" + mock_s = MagicMock() + mock_s.id = "s" + mock_t = MagicMock() + mock_t.id = "t" mock_deps["config"].get_environments.return_value = [mock_s, mock_t] - - mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[ - {"source_db": "src", "target_db": "dst", "confidence": 0.9} - ]) + + mock_deps["mapping"].get_suggestions = AsyncMock( + return_value=[{"source_db": "src", "target_db": "dst", "confidence": 0.9}] + ) response = client.get("/api/dashboards/db-mappings?source_env_id=s&target_env_id=t") assert response.status_code == 200 data = response.json() @@ -164,23 +235,41 @@ def test_get_database_mappings_success(mock_deps): assert data["mappings"][0]["confidence"] == 0.9 DatabaseMappingsResponse(**data) + +# [/DEF:test_get_database_mappings_success:Function] + + +# [DEF:test_get_database_mappings_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_database_mappings_env_not_found(mock_deps): mock_deps["config"].get_environments.return_value = [] - response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t") + response = client.get( + "/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t" + ) assert response.status_code == 404 + # --- 3. get_dashboard_detail tests --- +# [/DEF:test_get_database_mappings_env_not_found:Function] + + +# [DEF:test_get_dashboard_detail_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_detail_success(mock_deps): with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls: mock_env = MagicMock() mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] - + mock_client = MagicMock() detail_payload = { - "id": 42, "title": "Detail", "charts": [], "datasets": [], - "chart_count": 0, "dataset_count": 0 + "id": 42, + "title": "Detail", + "charts": [], + "datasets": [], + "chart_count": 0, + "dataset_count": 0, } mock_client.get_dashboard_detail.return_value = detail_payload mock_client_cls.return_value = mock_client @@ -191,16 +280,36 @@ def test_get_dashboard_detail_success(mock_deps): assert data["id"] == 42 DashboardDetailResponse(**data) + +# [/DEF:test_get_dashboard_detail_success:Function] + + +# [DEF:test_get_dashboard_detail_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_detail_env_not_found(mock_deps): mock_deps["config"].get_environments.return_value = [] response = client.get("/api/dashboards/42?env_id=missing") assert response.status_code == 404 + # --- 4. get_dashboard_tasks_history tests --- +# [/DEF:test_get_dashboard_detail_env_not_found:Function] + + +# [DEF:test_get_dashboard_tasks_history_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_tasks_history_success(mock_deps): now = datetime.now(timezone.utc) - task1 = MagicMock(id="t1", plugin_id="superset-backup", status="SUCCESS", started_at=now, finished_at=None, params={"env": "prod", "dashboards": [42]}, result={}) + task1 = MagicMock( + id="t1", + plugin_id="superset-backup", + status="SUCCESS", + started_at=now, + finished_at=None, + params={"env": "prod", "dashboards": [42]}, + result={}, + ) mock_deps["task"].get_all_tasks.return_value = [task1] response = client.get("/api/dashboards/42/tasks?env_id=prod") @@ -210,20 +319,39 @@ def test_get_dashboard_tasks_history_success(mock_deps): assert len(data["items"]) == 1 DashboardTaskHistoryResponse(**data) + +# [/DEF:test_get_dashboard_tasks_history_success:Function] + + +# [DEF:test_get_dashboard_tasks_history_sorting:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_tasks_history_sorting(mock_deps): """@POST: Response contains sorted task history (newest first).""" from datetime import timedelta + now = datetime.now(timezone.utc) older = now - timedelta(hours=2) newest = now - - task_old = MagicMock(id="t-old", plugin_id="superset-backup", status="SUCCESS", - started_at=older, finished_at=None, - params={"env": "prod", "dashboards": [42]}, result={}) - task_new = MagicMock(id="t-new", plugin_id="superset-backup", status="RUNNING", - started_at=newest, finished_at=None, - params={"env": "prod", "dashboards": [42]}, result={}) - + + task_old = MagicMock( + id="t-old", + plugin_id="superset-backup", + status="SUCCESS", + started_at=older, + finished_at=None, + params={"env": "prod", "dashboards": [42]}, + result={}, + ) + task_new = MagicMock( + id="t-new", + plugin_id="superset-backup", + status="RUNNING", + started_at=newest, + finished_at=None, + params={"env": "prod", "dashboards": [42]}, + result={}, + ) + # Provide in wrong order to verify the endpoint sorts mock_deps["task"].get_all_tasks.return_value = [task_old, task_new] @@ -235,38 +363,67 @@ def test_get_dashboard_tasks_history_sorting(mock_deps): assert data["items"][0]["id"] == "t-new" assert data["items"][1]["id"] == "t-old" + # --- 5. get_dashboard_thumbnail tests --- +# [/DEF:test_get_dashboard_tasks_history_sorting:Function] + + +# [DEF:test_get_dashboard_thumbnail_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_thumbnail_success(mock_deps): with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls: - mock_env = MagicMock(); mock_env.id = "prod" + mock_env = MagicMock() + mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_client = MagicMock() - mock_response = MagicMock(status_code=200, content=b"img", headers={"Content-Type": "image/png"}) - mock_client.network.request.side_effect = lambda method, endpoint, **kw: {"image_url": "url"} if method == "POST" else mock_response + mock_response = MagicMock( + status_code=200, content=b"img", headers={"Content-Type": "image/png"} + ) + mock_client.network.request.side_effect = ( + lambda method, endpoint, **kw: {"image_url": "url"} + if method == "POST" + else mock_response + ) mock_client_cls.return_value = mock_client response = client.get("/api/dashboards/42/thumbnail?env_id=prod") assert response.status_code == 200 assert response.content == b"img" + +# [/DEF:test_get_dashboard_thumbnail_success:Function] + + +# [DEF:test_get_dashboard_thumbnail_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_thumbnail_env_not_found(mock_deps): mock_deps["config"].get_environments.return_value = [] response = client.get("/api/dashboards/42/thumbnail?env_id=missing") assert response.status_code == 404 + +# [/DEF:test_get_dashboard_thumbnail_env_not_found:Function] + + +# [DEF:test_get_dashboard_thumbnail_202:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_get_dashboard_thumbnail_202(mock_deps): """@POST: Returns 202 when thumbnail is being prepared by Superset.""" with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls: - mock_env = MagicMock(); mock_env.id = "prod" + mock_env = MagicMock() + mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_client = MagicMock() - + # POST cache_dashboard_screenshot returns image_url mock_client.network.request.side_effect = [ {"image_url": "/api/v1/dashboard/42/thumbnail/abc123/"}, # POST - MagicMock(status_code=202, json=lambda: {"message": "Thumbnail is being generated"}, - headers={"Content-Type": "application/json"}) # GET thumbnail -> 202 + MagicMock( + status_code=202, + json=lambda: {"message": "Thumbnail is being generated"}, + headers={"Content-Type": "application/json"}, + ), # GET thumbnail -> 202 ] mock_client_cls.return_value = mock_client @@ -274,93 +431,156 @@ def test_get_dashboard_thumbnail_202(mock_deps): assert response.status_code == 202 assert "Thumbnail is being generated" in response.json()["message"] + # --- 6. migrate_dashboards tests --- +# [/DEF:test_get_dashboard_thumbnail_202:Function] + + +# [DEF:test_migrate_dashboards_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_migrate_dashboards_success(mock_deps): - mock_s = MagicMock(); mock_s.id = "s" - mock_t = MagicMock(); mock_t.id = "t" + mock_s = MagicMock() + mock_s.id = "s" + mock_t = MagicMock() + mock_t.id = "t" mock_deps["config"].get_environments.return_value = [mock_s, mock_t] mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="task-123")) - response = client.post("/api/dashboards/migrate", json={ - "source_env_id": "s", "target_env_id": "t", "dashboard_ids": [1] - }) + response = client.post( + "/api/dashboards/migrate", + json={"source_env_id": "s", "target_env_id": "t", "dashboard_ids": [1]}, + ) assert response.status_code == 200 assert response.json()["task_id"] == "task-123" + +# [/DEF:test_migrate_dashboards_success:Function] + + +# [DEF:test_migrate_dashboards_pre_checks:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_migrate_dashboards_pre_checks(mock_deps): # Missing IDs - response = client.post("/api/dashboards/migrate", json={ - "source_env_id": "s", "target_env_id": "t", "dashboard_ids": [] - }) + response = client.post( + "/api/dashboards/migrate", + json={"source_env_id": "s", "target_env_id": "t", "dashboard_ids": []}, + ) assert response.status_code == 400 assert "At least one dashboard ID must be provided" in response.json()["detail"] + +# [/DEF:test_migrate_dashboards_pre_checks:Function] + + +# [DEF:test_migrate_dashboards_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_migrate_dashboards_env_not_found(mock_deps): """@PRE: source_env_id and target_env_id are valid environment IDs.""" mock_deps["config"].get_environments.return_value = [] - response = client.post("/api/dashboards/migrate", json={ - "source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1] - }) + response = client.post( + "/api/dashboards/migrate", + json={"source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1]}, + ) assert response.status_code == 404 assert "Source environment not found" in response.json()["detail"] + # --- 7. backup_dashboards tests --- +# [/DEF:test_migrate_dashboards_env_not_found:Function] + + +# [DEF:test_backup_dashboards_success:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_backup_dashboards_success(mock_deps): - mock_env = MagicMock(); mock_env.id = "prod" + mock_env = MagicMock() + mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="backup-123")) - response = client.post("/api/dashboards/backup", json={ - "env_id": "prod", "dashboard_ids": [1] - }) + response = client.post( + "/api/dashboards/backup", json={"env_id": "prod", "dashboard_ids": [1]} + ) assert response.status_code == 200 assert response.json()["task_id"] == "backup-123" + +# [/DEF:test_backup_dashboards_success:Function] + + +# [DEF:test_backup_dashboards_pre_checks:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_backup_dashboards_pre_checks(mock_deps): - response = client.post("/api/dashboards/backup", json={ - "env_id": "prod", "dashboard_ids": [] - }) + response = client.post( + "/api/dashboards/backup", json={"env_id": "prod", "dashboard_ids": []} + ) assert response.status_code == 400 + +# [/DEF:test_backup_dashboards_pre_checks:Function] + + +# [DEF:test_backup_dashboards_env_not_found:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_backup_dashboards_env_not_found(mock_deps): """@PRE: env_id is a valid environment ID.""" mock_deps["config"].get_environments.return_value = [] - response = client.post("/api/dashboards/backup", json={ - "env_id": "ghost", "dashboard_ids": [1] - }) + response = client.post( + "/api/dashboards/backup", json={"env_id": "ghost", "dashboard_ids": [1]} + ) assert response.status_code == 404 assert "Environment not found" in response.json()["detail"] + +# [/DEF:test_backup_dashboards_env_not_found:Function] + + +# [DEF:test_backup_dashboards_with_schedule:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_backup_dashboards_with_schedule(mock_deps): """@POST: If schedule is provided, a scheduled task is created.""" - mock_env = MagicMock(); mock_env.id = "prod" + mock_env = MagicMock() + mock_env.id = "prod" mock_deps["config"].get_environments.return_value = [mock_env] mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="sched-456")) - response = client.post("/api/dashboards/backup", json={ - "env_id": "prod", "dashboard_ids": [1], "schedule": "0 0 * * *" - }) + response = client.post( + "/api/dashboards/backup", + json={"env_id": "prod", "dashboard_ids": [1], "schedule": "0 0 * * *"}, + ) assert response.status_code == 200 assert response.json()["task_id"] == "sched-456" - + # Verify schedule was propagated to create_task call_kwargs = mock_deps["task"].create_task.call_args task_params = call_kwargs.kwargs.get("params") or call_kwargs[1].get("params", {}) assert task_params["schedule"] == "0 0 * * *" + # --- 8. Internal logic: _task_matches_dashboard --- +# [/DEF:test_backup_dashboards_with_schedule:Function] + from src.api.routes.dashboards import _task_matches_dashboard + +# [DEF:test_task_matches_dashboard_logic:Function] +# @RELATION: BINDS_TO ->[TestDashboardsApi] def test_task_matches_dashboard_logic(): - task = MagicMock(plugin_id="superset-backup", params={"dashboards": [42], "env": "prod"}) + task = MagicMock( + plugin_id="superset-backup", params={"dashboards": [42], "env": "prod"} + ) assert _task_matches_dashboard(task, 42, "prod") is True assert _task_matches_dashboard(task, 43, "prod") is False assert _task_matches_dashboard(task, 42, "dev") is False - llm_task = MagicMock(plugin_id="llm_dashboard_validation", params={"dashboard_id": 42, "environment_id": "prod"}) + llm_task = MagicMock( + plugin_id="llm_dashboard_validation", + params={"dashboard_id": 42, "environment_id": "prod"}, + ) assert _task_matches_dashboard(llm_task, 42, "prod") is True assert _task_matches_dashboard(llm_task, 42, None) is True -# [/DEF:backend.tests.test_dashboards_api:Module] + +# [/DEF:test_task_matches_dashboard_logic:Function] +# [/DEF:TestDashboardsApi:Module]