# [DEF:backend.src.api.routes.dashboards:Module] # # @TIER: CRITICAL # @SEMANTICS: api, dashboards, resources, hub # @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status # @LAYER: API # @RELATION: DEPENDS_ON -> backend.src.dependencies # @RELATION: DEPENDS_ON -> backend.src.services.resource_service # @RELATION: DEPENDS_ON -> backend.src.core.superset_client # # @INVARIANT: All dashboard responses include git_status and last_task metadata # # @TEST_CONTRACT: DashboardsAPI -> { # required_fields: {env_id: string, page: integer, page_size: integer}, # optional_fields: {search: string}, # invariants: ["Pagination must be valid", "Environment must exist"] # } # # @TEST_FIXTURE: dashboard_list_happy -> { # "env_id": "prod", # "expected_count": 1, # "dashboards": [{"id": 1, "title": "Main Revenue"}] # } # # @TEST_EDGE: pagination_zero_page -> {"env_id": "prod", "page": 0, "status": 400} # @TEST_EDGE: pagination_oversize -> {"env_id": "prod", "page_size": 101, "status": 400} # @TEST_EDGE: missing_env -> {"env_id": "ghost", "status": 404} # @TEST_EDGE: empty_dashboards -> {"env_id": "empty_env", "expected_total": 0} # @TEST_EDGE: external_superset_failure -> {"env_id": "bad_conn", "status": 503} # # @TEST_INVARIANT: metadata_consistency -> verifies: [dashboard_list_happy, empty_dashboards] # # [SECTION: IMPORTS] from fastapi import APIRouter, Depends, HTTPException, Query, Response from fastapi.responses import JSONResponse from typing import List, Optional, Dict, Any, Literal import re from urllib.parse import urlparse from pydantic import BaseModel, Field from sqlalchemy.orm import Session from ...dependencies import ( get_config_manager, get_task_manager, get_resource_service, get_mapping_service, get_current_user, has_permission, ) from ...core.database import get_db from ...core.async_superset_client import AsyncSupersetClient from ...core.logger import logger, belief_scope from ...core.superset_client import SupersetClient from ...core.superset_profile_lookup import SupersetAccountLookupAdapter from ...core.utils.network import DashboardNotFoundError from ...models.auth import User from ...services.profile_service import ProfileService from ...services.resource_service import ResourceService # [/SECTION] router = APIRouter(prefix="/api/dashboards", tags=["Dashboards"]) # [DEF:GitStatus:DataClass] class GitStatus(BaseModel): branch: Optional[str] = None sync_status: Optional[str] = Field(None, pattern="^OK|DIFF|NO_REPO|ERROR$") has_repo: Optional[bool] = None has_changes_for_commit: Optional[bool] = None # [/DEF:GitStatus:DataClass] # [DEF:LastTask:DataClass] class LastTask(BaseModel): task_id: Optional[str] = None status: Optional[str] = Field( None, pattern="^PENDING|RUNNING|SUCCESS|FAILED|ERROR|AWAITING_INPUT|WAITING_INPUT|AWAITING_MAPPING$", ) validation_status: Optional[str] = Field(None, pattern="^PASS|FAIL|WARN|UNKNOWN$") # [/DEF:LastTask:DataClass] # [DEF:DashboardItem:DataClass] class DashboardItem(BaseModel): id: int title: str slug: Optional[str] = None url: Optional[str] = None last_modified: Optional[str] = None created_by: Optional[str] = None modified_by: Optional[str] = None owners: Optional[List[str]] = None git_status: Optional[GitStatus] = None last_task: Optional[LastTask] = None # [/DEF:DashboardItem:DataClass] # [DEF:EffectiveProfileFilter:DataClass] class EffectiveProfileFilter(BaseModel): applied: bool source_page: Literal["dashboards_main", "other"] = "dashboards_main" override_show_all: bool = False username: Optional[str] = None match_logic: Optional[ Literal["owners_or_modified_by", "slug_only", "owners_or_modified_by+slug_only"] ] = None # [/DEF:EffectiveProfileFilter:DataClass] # [DEF:DashboardsResponse:DataClass] class DashboardsResponse(BaseModel): dashboards: List[DashboardItem] total: int page: int page_size: int total_pages: int effective_profile_filter: Optional[EffectiveProfileFilter] = None # [/DEF:DashboardsResponse:DataClass] # [DEF:DashboardChartItem:DataClass] class DashboardChartItem(BaseModel): id: int title: str viz_type: Optional[str] = None dataset_id: Optional[int] = None last_modified: Optional[str] = None overview: Optional[str] = None # [/DEF:DashboardChartItem:DataClass] # [DEF:DashboardDatasetItem:DataClass] class DashboardDatasetItem(BaseModel): id: int table_name: str schema: Optional[str] = None database: str last_modified: Optional[str] = None overview: Optional[str] = None # [/DEF:DashboardDatasetItem:DataClass] # [DEF:DashboardDetailResponse:DataClass] class DashboardDetailResponse(BaseModel): id: int title: str slug: Optional[str] = None url: Optional[str] = None description: Optional[str] = None last_modified: Optional[str] = None published: Optional[bool] = None charts: List[DashboardChartItem] datasets: List[DashboardDatasetItem] chart_count: int dataset_count: int # [/DEF:DashboardDetailResponse:DataClass] # [DEF:DashboardTaskHistoryItem:DataClass] class DashboardTaskHistoryItem(BaseModel): id: str plugin_id: str status: str validation_status: Optional[str] = None started_at: Optional[str] = None finished_at: Optional[str] = None env_id: Optional[str] = None summary: Optional[str] = None # [/DEF:DashboardTaskHistoryItem:DataClass] # [DEF:DashboardTaskHistoryResponse:DataClass] class DashboardTaskHistoryResponse(BaseModel): dashboard_id: int items: List[DashboardTaskHistoryItem] # [/DEF:DashboardTaskHistoryResponse:DataClass] # [DEF:DatabaseMapping:DataClass] class DatabaseMapping(BaseModel): source_db: str target_db: str source_db_uuid: Optional[str] = None target_db_uuid: Optional[str] = None confidence: float # [/DEF:DatabaseMapping:DataClass] # [DEF:DatabaseMappingsResponse:DataClass] class DatabaseMappingsResponse(BaseModel): mappings: List[DatabaseMapping] # [/DEF:DatabaseMappingsResponse:DataClass] # [DEF:_find_dashboard_id_by_slug:Function] # @PURPOSE: Resolve dashboard numeric ID by slug using Superset list endpoint. # @PRE: `dashboard_slug` is non-empty. # @POST: Returns dashboard ID when found, otherwise None. def _find_dashboard_id_by_slug( client: SupersetClient, dashboard_slug: str, ) -> Optional[int]: query_variants = [ {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, ] for query in query_variants: try: _count, dashboards = client.get_dashboards_page(query=query) if dashboards: resolved_id = dashboards[0].get("id") if resolved_id is not None: return int(resolved_id) except Exception: continue return None # [/DEF:_find_dashboard_id_by_slug:Function] # [DEF:_resolve_dashboard_id_from_ref:Function] # @PURPOSE: Resolve dashboard ID from slug-first reference with numeric fallback. # @PRE: `dashboard_ref` is provided in route path. # @POST: Returns a valid dashboard ID or raises HTTPException(404). def _resolve_dashboard_id_from_ref( dashboard_ref: str, client: SupersetClient, ) -> int: normalized_ref = str(dashboard_ref or "").strip() if not normalized_ref: raise HTTPException(status_code=404, detail="Dashboard not found") # Slug-first: even if ref looks numeric, try slug first. slug_match_id = _find_dashboard_id_by_slug(client, normalized_ref) if slug_match_id is not None: return slug_match_id if normalized_ref.isdigit(): return int(normalized_ref) raise HTTPException(status_code=404, detail="Dashboard not found") # [/DEF:_resolve_dashboard_id_from_ref:Function] # [DEF:_find_dashboard_id_by_slug_async:Function] # @PURPOSE: Resolve dashboard numeric ID by slug using async Superset list endpoint. # @PRE: dashboard_slug is non-empty. # @POST: Returns dashboard ID when found, otherwise None. async def _find_dashboard_id_by_slug_async( client: AsyncSupersetClient, dashboard_slug: str, ) -> Optional[int]: query_variants = [ {"filters": [{"col": "slug", "opr": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, {"filters": [{"col": "slug", "op": "eq", "value": dashboard_slug}], "page": 0, "page_size": 1}, ] for query in query_variants: try: _count, dashboards = await client.get_dashboards_page_async(query=query) if dashboards: resolved_id = dashboards[0].get("id") if resolved_id is not None: return int(resolved_id) except Exception: continue return None # [/DEF:_find_dashboard_id_by_slug_async:Function] # [DEF:_resolve_dashboard_id_from_ref_async:Function] # @PURPOSE: Resolve dashboard ID from slug-first reference using async Superset client. # @PRE: dashboard_ref is provided in route path. # @POST: Returns valid dashboard ID or raises HTTPException(404). async def _resolve_dashboard_id_from_ref_async( dashboard_ref: str, client: AsyncSupersetClient, ) -> int: normalized_ref = str(dashboard_ref or "").strip() if not normalized_ref: raise HTTPException(status_code=404, detail="Dashboard not found") slug_match_id = await _find_dashboard_id_by_slug_async(client, normalized_ref) if slug_match_id is not None: return slug_match_id if normalized_ref.isdigit(): return int(normalized_ref) raise HTTPException(status_code=404, detail="Dashboard not found") # [/DEF:_resolve_dashboard_id_from_ref_async:Function] # [DEF:_normalize_filter_values:Function] # @PURPOSE: Normalize query filter values to lower-cased non-empty tokens. # @PRE: values may be None or list of strings. # @POST: Returns trimmed normalized list preserving input order. def _normalize_filter_values(values: Optional[List[str]]) -> List[str]: if not values: return [] normalized: List[str] = [] for value in values: token = str(value or "").strip().lower() if token: normalized.append(token) return normalized # [/DEF:_normalize_filter_values:Function] # [DEF:_dashboard_git_filter_value:Function] # @PURPOSE: Build comparable git status token for dashboards filtering. # @PRE: dashboard payload may contain git_status or None. # @POST: Returns one of ok|diff|no_repo|error|pending. def _dashboard_git_filter_value(dashboard: Dict[str, Any]) -> str: git_status = dashboard.get("git_status") or {} sync_status = str(git_status.get("sync_status") or "").strip().upper() has_repo = git_status.get("has_repo") if has_repo is False or sync_status == "NO_REPO": return "no_repo" if sync_status == "DIFF": return "diff" if sync_status == "OK": return "ok" if sync_status == "ERROR": return "error" return "pending" # [/DEF:_dashboard_git_filter_value:Function] # [DEF:_normalize_actor_alias_token:Function] # @PURPOSE: Normalize actor alias token to comparable trim+lower text. # @PRE: value can be scalar/None. # @POST: Returns normalized token or None. def _normalize_actor_alias_token(value: Any) -> Optional[str]: token = str(value or "").strip().lower() return token or None # [/DEF:_normalize_actor_alias_token:Function] # [DEF:_normalize_owner_display_token:Function] # @PURPOSE: Project owner payload value into stable display string for API response contracts. # @PRE: owner can be scalar, dict or None. # @POST: Returns trimmed non-empty owner display token or None. def _normalize_owner_display_token(owner: Any) -> Optional[str]: if owner is None: return None if isinstance(owner, dict): username = str(owner.get("username") or owner.get("user_name") or owner.get("name") or "").strip() full_name = str(owner.get("full_name") or "").strip() first_name = str(owner.get("first_name") or "").strip() last_name = str(owner.get("last_name") or "").strip() combined = " ".join(part for part in [first_name, last_name] if part).strip() email = str(owner.get("email") or "").strip() for candidate in [username, full_name, combined, email]: if candidate: return candidate return None normalized = str(owner).strip() return normalized or None # [/DEF:_normalize_owner_display_token:Function] # [DEF:_normalize_dashboard_owner_values:Function] # @PURPOSE: Normalize dashboard owners payload to optional list of display strings. # @PRE: owners payload can be None, scalar, or list with mixed values. # @POST: Returns deduplicated owner labels preserving order, or None when absent. def _normalize_dashboard_owner_values(owners: Any) -> Optional[List[str]]: if owners is None: return None raw_items: List[Any] if isinstance(owners, list): raw_items = owners else: raw_items = [owners] normalized: List[str] = [] for owner in raw_items: token = _normalize_owner_display_token(owner) if token and token not in normalized: normalized.append(token) return normalized # [/DEF:_normalize_dashboard_owner_values:Function] # [DEF:_project_dashboard_response_items:Function] # @PURPOSE: Project dashboard payloads to response-contract-safe shape. # @PRE: dashboards is a list of dict-like dashboard payloads. # @POST: Returned items satisfy DashboardItem owners=list[str]|None contract. def _project_dashboard_response_items(dashboards: List[Dict[str, Any]]) -> List[Dict[str, Any]]: projected: List[Dict[str, Any]] = [] for dashboard in dashboards: projected_dashboard = dict(dashboard) projected_dashboard["owners"] = _normalize_dashboard_owner_values( projected_dashboard.get("owners") ) projected.append(projected_dashboard) return projected # [/DEF:_project_dashboard_response_items:Function] # [DEF:_resolve_profile_actor_aliases:Function] # @PURPOSE: Resolve stable actor aliases for profile filtering without per-dashboard detail fan-out. # @PRE: bound username is available and env is valid. # @POST: Returns at least normalized username; may include Superset display-name alias. # @SIDE_EFFECT: Performs at most one Superset users-lookup request. def _resolve_profile_actor_aliases(env: Any, bound_username: str) -> List[str]: normalized_bound = _normalize_actor_alias_token(bound_username) if not normalized_bound: return [] aliases: List[str] = [normalized_bound] try: client = SupersetClient(env) adapter = SupersetAccountLookupAdapter( network_client=client.network, environment_id=str(getattr(env, "id", "")), ) lookup_payload = adapter.get_users_page( search=normalized_bound, page_index=0, page_size=20, sort_column="username", sort_order="asc", ) lookup_items = ( lookup_payload.get("items", []) if isinstance(lookup_payload, dict) else [] ) matched_item: Optional[Dict[str, Any]] = None for item in lookup_items: if not isinstance(item, dict): continue if _normalize_actor_alias_token(item.get("username")) == normalized_bound: matched_item = item break if matched_item is None: for item in lookup_items: if isinstance(item, dict): matched_item = item break display_alias = _normalize_actor_alias_token( (matched_item or {}).get("display_name") ) if display_alias and display_alias not in aliases: aliases.append(display_alias) logger.reflect( "[REFLECT] Resolved profile actor aliases " f"(env={getattr(env, 'id', None)}, bound_username={normalized_bound!r}, " f"lookup_items={len(lookup_items)}, aliases={aliases!r})" ) except Exception as alias_error: logger.explore( "[EXPLORE] Failed to resolve profile actor aliases via Superset users lookup " f"(env={getattr(env, 'id', None)}, bound_username={normalized_bound!r}): {alias_error}" ) return aliases # [/DEF:_resolve_profile_actor_aliases:Function] # [DEF:_matches_dashboard_actor_aliases:Function] # @PURPOSE: Apply profile actor matching against multiple aliases (username + optional display name). # @PRE: actor_aliases contains normalized non-empty tokens. # @POST: Returns True when any alias matches owners OR modified_by. def _matches_dashboard_actor_aliases( profile_service: ProfileService, actor_aliases: List[str], owners: Optional[Any], modified_by: Optional[str], ) -> bool: for actor_alias in actor_aliases: if profile_service.matches_dashboard_actor( bound_username=actor_alias, owners=owners, modified_by=modified_by, ): return True return False # [/DEF:_matches_dashboard_actor_aliases:Function] # [DEF:get_dashboards:Function] # @PURPOSE: Fetch list of dashboards from a specific environment with Git status and last task status # @PRE: env_id must be a valid environment ID # @PRE: page must be >= 1 if provided # @PRE: page_size must be between 1 and 100 if provided # @POST: Returns a list of dashboards with enhanced metadata and pagination info # @POST: Response includes pagination metadata (page, page_size, total, total_pages) # @POST: Response includes effective profile filter metadata for main dashboards page context # @PARAM: env_id (str) - The environment ID to fetch dashboards from # @PARAM: search (Optional[str]) - Filter by title/slug # @PARAM: page (Optional[int]) - Page number (default: 1) # @PARAM: page_size (Optional[int]) - Items per page (default: 10, max: 100) # @RETURN: DashboardsResponse - List of dashboards with status metadata # @RELATION: CALLS -> ResourceService.get_dashboards_with_status @router.get("", response_model=DashboardsResponse) async def get_dashboards( env_id: str, search: Optional[str] = None, page: int = 1, page_size: int = 10, page_context: Literal["dashboards_main", "other"] = Query(default="dashboards_main"), apply_profile_default: bool = Query(default=True), override_show_all: bool = Query(default=False), filter_title: Optional[List[str]] = Query(default=None), filter_git_status: Optional[List[str]] = Query(default=None), filter_llm_status: Optional[List[str]] = Query(default=None), filter_changed_on: Optional[List[str]] = Query(default=None), filter_actor: Optional[List[str]] = Query(default=None), config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), resource_service=Depends(get_resource_service), current_user: User = Depends(get_current_user), db: Session = Depends(get_db), _ = Depends(has_permission("plugin:migration", "READ")) ): with belief_scope( "get_dashboards", ( f"env_id={env_id}, search={search}, page={page}, page_size={page_size}, " f"page_context={page_context}, apply_profile_default={apply_profile_default}, " f"override_show_all={override_show_all}" ), ): if page < 1: logger.error(f"[get_dashboards][Coherence:Failed] Invalid page: {page}") raise HTTPException(status_code=400, detail="Page must be >= 1") if page_size < 1 or page_size > 100: logger.error(f"[get_dashboards][Coherence:Failed] Invalid page_size: {page_size}") raise HTTPException(status_code=400, detail="Page size must be between 1 and 100") environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: logger.error(f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}") raise HTTPException(status_code=404, detail="Environment not found") profile_service = ProfileService(db=db, config_manager=config_manager) bound_username: Optional[str] = None can_apply_profile_filter = False can_apply_slug_filter = False effective_profile_filter = EffectiveProfileFilter( applied=False, source_page=page_context, override_show_all=bool(override_show_all), username=None, match_logic=None, ) try: profile_preference = profile_service.get_my_preference(current_user).preference normalized_username = str( getattr(profile_preference, "superset_username_normalized", None) or "" ).strip().lower() raw_username = str( getattr(profile_preference, "superset_username", None) or "" ).strip().lower() bound_username = normalized_username or raw_username or None can_apply_profile_filter = ( page_context == "dashboards_main" and bool(apply_profile_default) and not bool(override_show_all) and bool(getattr(profile_preference, "show_only_my_dashboards", False)) and bool(bound_username) ) can_apply_slug_filter = ( page_context == "dashboards_main" and bool(apply_profile_default) and not bool(override_show_all) and bool(getattr(profile_preference, "show_only_slug_dashboards", True)) ) profile_match_logic = None if can_apply_profile_filter and can_apply_slug_filter: profile_match_logic = "owners_or_modified_by+slug_only" elif can_apply_profile_filter: profile_match_logic = "owners_or_modified_by" elif can_apply_slug_filter: profile_match_logic = "slug_only" effective_profile_filter = EffectiveProfileFilter( applied=bool(can_apply_profile_filter or can_apply_slug_filter), source_page=page_context, override_show_all=bool(override_show_all), username=bound_username if can_apply_profile_filter else None, match_logic=profile_match_logic, ) except Exception as profile_error: logger.explore( f"[EXPLORE] Profile preference unavailable; continuing without profile-default filter: {profile_error}" ) try: all_tasks = task_manager.get_all_tasks() title_filters = _normalize_filter_values(filter_title) git_filters = _normalize_filter_values(filter_git_status) llm_filters = _normalize_filter_values(filter_llm_status) changed_on_filters = _normalize_filter_values(filter_changed_on) actor_filters = _normalize_filter_values(filter_actor) has_column_filters = any( ( title_filters, git_filters, llm_filters, changed_on_filters, actor_filters, ) ) needs_full_scan = has_column_filters or bool(can_apply_profile_filter) or bool(can_apply_slug_filter) if isinstance(resource_service, ResourceService) and not needs_full_scan: try: page_payload = await resource_service.get_dashboards_page_with_status( env, all_tasks, page=page, page_size=page_size, search=search, include_git_status=False, require_slug=bool(can_apply_slug_filter), ) paginated_dashboards = page_payload["dashboards"] total = page_payload["total"] total_pages = page_payload["total_pages"] except Exception as page_error: logger.warning( "[get_dashboards][Action] Page-based fetch failed; using compatibility fallback: %s", page_error, ) dashboards = await resource_service.get_dashboards_with_status( env, all_tasks, include_git_status=False, require_slug=bool(can_apply_slug_filter), ) if search: search_lower = search.lower() dashboards = [ d for d in dashboards if search_lower in d.get("title", "").lower() or search_lower in d.get("slug", "").lower() ] total = len(dashboards) total_pages = (total + page_size - 1) // page_size if total > 0 else 1 start_idx = (page - 1) * page_size end_idx = start_idx + page_size paginated_dashboards = dashboards[start_idx:end_idx] else: dashboards = await resource_service.get_dashboards_with_status( env, all_tasks, include_git_status=bool(git_filters), require_slug=bool(can_apply_slug_filter), ) if can_apply_profile_filter and bound_username: actor_aliases = _resolve_profile_actor_aliases(env, bound_username) if not actor_aliases: actor_aliases = [bound_username] logger.reason( "[REASON] Applying profile actor filter " f"(env={env_id}, bound_username={bound_username}, actor_aliases={actor_aliases!r}, " f"dashboards_before={len(dashboards)})" ) filtered_dashboards: List[Dict[str, Any]] = [] max_actor_samples = 15 for index, dashboard in enumerate(dashboards): owners_value = dashboard.get("owners") created_by_value = dashboard.get("created_by") modified_by_value = dashboard.get("modified_by") matches_actor = _matches_dashboard_actor_aliases( profile_service=profile_service, actor_aliases=actor_aliases, owners=owners_value, modified_by=modified_by_value, ) if index < max_actor_samples: logger.reflect( "[REFLECT] Profile actor filter sample " f"(env={env_id}, dashboard_id={dashboard.get('id')}, " f"bound_username={bound_username!r}, actor_aliases={actor_aliases!r}, " f"owners={owners_value!r}, created_by={created_by_value!r}, " f"modified_by={modified_by_value!r}, matches={matches_actor})" ) if matches_actor: filtered_dashboards.append(dashboard) logger.reflect( "[REFLECT] Profile actor filter summary " f"(env={env_id}, bound_username={bound_username!r}, " f"dashboards_before={len(dashboards)}, dashboards_after={len(filtered_dashboards)})" ) dashboards = filtered_dashboards if can_apply_slug_filter: dashboards = [ dashboard for dashboard in dashboards if str(dashboard.get("slug") or "").strip() ] if search: search_lower = search.lower() dashboards = [ d for d in dashboards if search_lower in d.get("title", "").lower() or search_lower in d.get("slug", "").lower() ] def _matches_dashboard_filters(dashboard: Dict[str, Any]) -> bool: title_value = str(dashboard.get("title") or "").strip().lower() if title_filters and title_value not in title_filters: return False if git_filters: git_value = _dashboard_git_filter_value(dashboard) if git_value not in git_filters: return False llm_value = str( ((dashboard.get("last_task") or {}).get("validation_status")) or "UNKNOWN" ).strip().lower() if llm_filters and llm_value not in llm_filters: return False changed_on_raw = str(dashboard.get("last_modified") or "").strip().lower() changed_on_prefix = ( changed_on_raw[:10] if len(changed_on_raw) >= 10 else changed_on_raw ) if ( changed_on_filters and changed_on_raw not in changed_on_filters and changed_on_prefix not in changed_on_filters ): return False owners = dashboard.get("owners") or [] if isinstance(owners, list): actor_value = ", ".join( str(item).strip() for item in owners if str(item).strip() ).lower() else: actor_value = str(owners).strip().lower() if not actor_value: actor_value = "-" if actor_filters and actor_value not in actor_filters: return False return True if has_column_filters: dashboards = [d for d in dashboards if _matches_dashboard_filters(d)] total = len(dashboards) total_pages = (total + page_size - 1) // page_size if total > 0 else 1 start_idx = (page - 1) * page_size end_idx = start_idx + page_size paginated_dashboards = dashboards[start_idx:end_idx] logger.info( f"[get_dashboards][Coherence:OK] Returning {len(paginated_dashboards)} dashboards " f"(page {page}/{total_pages}, total: {total}, profile_filter_applied={effective_profile_filter.applied})" ) response_dashboards = _project_dashboard_response_items(paginated_dashboards) return DashboardsResponse( dashboards=response_dashboards, total=total, page=page, page_size=page_size, total_pages=total_pages, effective_profile_filter=effective_profile_filter, ) except Exception as e: logger.error(f"[get_dashboards][Coherence:Failed] Failed to fetch dashboards: {e}") raise HTTPException(status_code=503, detail=f"Failed to fetch dashboards: {str(e)}") # [/DEF:get_dashboards:Function] # [DEF:get_database_mappings:Function] # @PURPOSE: Get database mapping suggestions between source and target environments # @PRE: User has permission plugin:migration:read # @PRE: source_env_id and target_env_id are valid environment IDs # @POST: Returns list of suggested database mappings with confidence scores # @PARAM: source_env_id (str) - Source environment ID # @PARAM: target_env_id (str) - Target environment ID # @RETURN: DatabaseMappingsResponse - List of suggested mappings # @RELATION: CALLS -> MappingService.get_suggestions @router.get("/db-mappings", response_model=DatabaseMappingsResponse) async def get_database_mappings( source_env_id: str, target_env_id: str, config_manager=Depends(get_config_manager), mapping_service=Depends(get_mapping_service), _ = Depends(has_permission("plugin:migration", "READ")) ): with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"): # Validate environments exist environments = config_manager.get_environments() source_env = next((e for e in environments if e.id == source_env_id), None) target_env = next((e for e in environments if e.id == target_env_id), None) if not source_env: logger.error(f"[get_database_mappings][Coherence:Failed] Source environment not found: {source_env_id}") raise HTTPException(status_code=404, detail="Source environment not found") if not target_env: logger.error(f"[get_database_mappings][Coherence:Failed] Target environment not found: {target_env_id}") raise HTTPException(status_code=404, detail="Target environment not found") try: # Get mapping suggestions using MappingService suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id) # Format suggestions as DatabaseMapping objects mappings = [ DatabaseMapping( source_db=s.get('source_db', ''), target_db=s.get('target_db', ''), source_db_uuid=s.get('source_db_uuid'), target_db_uuid=s.get('target_db_uuid'), confidence=s.get('confidence', 0.0) ) for s in suggestions ] logger.info(f"[get_database_mappings][Coherence:OK] Returning {len(mappings)} database mapping suggestions") return DatabaseMappingsResponse(mappings=mappings) except Exception as e: logger.error(f"[get_database_mappings][Coherence:Failed] Failed to get database mappings: {e}") raise HTTPException(status_code=503, detail=f"Failed to get database mappings: {str(e)}") # [/DEF:get_database_mappings:Function] # [DEF:get_dashboard_detail:Function] # @PURPOSE: Fetch detailed dashboard info with related charts and datasets # @PRE: env_id must be valid and dashboard ref (slug or id) must exist # @POST: Returns dashboard detail payload for overview page # @RELATION: CALLS -> SupersetClient.get_dashboard_detail @router.get("/{dashboard_ref}", response_model=DashboardDetailResponse) async def get_dashboard_detail( dashboard_ref: str, env_id: str, config_manager=Depends(get_config_manager), _ = Depends(has_permission("plugin:migration", "READ")) ): with belief_scope("get_dashboard_detail", f"dashboard_ref={dashboard_ref}, env_id={env_id}"): environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: logger.error(f"[get_dashboard_detail][Coherence:Failed] Environment not found: {env_id}") raise HTTPException(status_code=404, detail="Environment not found") client = AsyncSupersetClient(env) try: dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client) detail = await client.get_dashboard_detail_async(dashboard_id) logger.info( f"[get_dashboard_detail][Coherence:OK] Dashboard ref={dashboard_ref} resolved_id={dashboard_id}: {detail.get('chart_count', 0)} charts, {detail.get('dataset_count', 0)} datasets" ) return DashboardDetailResponse(**detail) except HTTPException: raise except Exception as e: logger.error(f"[get_dashboard_detail][Coherence:Failed] Failed to fetch dashboard detail: {e}") raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard detail: {str(e)}") finally: await client.aclose() # [/DEF:get_dashboard_detail:Function] # [DEF:_task_matches_dashboard:Function] # @PURPOSE: Checks whether task params are tied to a specific dashboard and environment. # @PRE: task-like object exposes plugin_id and params fields. # @POST: Returns True only for supported task plugins tied to dashboard_id (+optional env_id). def _task_matches_dashboard(task: Any, dashboard_id: int, env_id: Optional[str]) -> bool: plugin_id = getattr(task, "plugin_id", None) if plugin_id not in {"superset-backup", "llm_dashboard_validation"}: return False params = getattr(task, "params", {}) or {} dashboard_id_str = str(dashboard_id) if plugin_id == "llm_dashboard_validation": task_dashboard_id = params.get("dashboard_id") if str(task_dashboard_id) != dashboard_id_str: return False if env_id: task_env = params.get("environment_id") return str(task_env) == str(env_id) return True # superset-backup can pass dashboards as "dashboard_ids" or "dashboards" dashboard_ids = params.get("dashboard_ids") or params.get("dashboards") or [] normalized_ids = {str(item) for item in dashboard_ids} if dashboard_id_str not in normalized_ids: return False if env_id: task_env = params.get("environment_id") or params.get("env") return str(task_env) == str(env_id) return True # [/DEF:_task_matches_dashboard:Function] # [DEF:get_dashboard_tasks_history:Function] # @PURPOSE: Returns history of backup and LLM validation tasks for a dashboard. # @PRE: dashboard ref (slug or id) is valid. # @POST: Response contains sorted task history (newest first). @router.get("/{dashboard_ref}/tasks", response_model=DashboardTaskHistoryResponse) async def get_dashboard_tasks_history( dashboard_ref: str, env_id: Optional[str] = None, limit: int = Query(20, ge=1, le=100), config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), _ = Depends(has_permission("tasks", "READ")) ): with belief_scope("get_dashboard_tasks_history", f"dashboard_ref={dashboard_ref}, env_id={env_id}, limit={limit}"): dashboard_id: Optional[int] = None client: Optional[AsyncSupersetClient] = None try: if dashboard_ref.isdigit(): dashboard_id = int(dashboard_ref) elif env_id: environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: logger.error(f"[get_dashboard_tasks_history][Coherence:Failed] Environment not found: {env_id}") raise HTTPException(status_code=404, detail="Environment not found") client = AsyncSupersetClient(env) dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client) else: logger.error( "[get_dashboard_tasks_history][Coherence:Failed] Non-numeric dashboard ref requires env_id" ) raise HTTPException( status_code=400, detail="env_id is required when dashboard reference is a slug", ) matching_tasks = [] for task in task_manager.get_all_tasks(): if _task_matches_dashboard(task, dashboard_id, env_id): matching_tasks.append(task) def _sort_key(task_obj: Any) -> str: return ( str(getattr(task_obj, "started_at", "") or "") or str(getattr(task_obj, "finished_at", "") or "") ) matching_tasks.sort(key=_sort_key, reverse=True) selected = matching_tasks[:limit] items = [] for task in selected: result = getattr(task, "result", None) summary = None validation_status = None if isinstance(result, dict): raw_validation_status = result.get("status") if raw_validation_status is not None: validation_status = str(raw_validation_status) summary = ( result.get("summary") or result.get("status") or result.get("message") ) params = getattr(task, "params", {}) or {} items.append( DashboardTaskHistoryItem( id=str(getattr(task, "id", "")), plugin_id=str(getattr(task, "plugin_id", "")), status=str(getattr(task, "status", "")), validation_status=validation_status, started_at=getattr(task, "started_at", None).isoformat() if getattr(task, "started_at", None) else None, finished_at=getattr(task, "finished_at", None).isoformat() if getattr(task, "finished_at", None) else None, env_id=str(params.get("environment_id") or params.get("env")) if (params.get("environment_id") or params.get("env")) else None, summary=summary, ) ) logger.info(f"[get_dashboard_tasks_history][Coherence:OK] Found {len(items)} tasks for dashboard_ref={dashboard_ref}, dashboard_id={dashboard_id}") return DashboardTaskHistoryResponse(dashboard_id=dashboard_id, items=items) finally: if client is not None: await client.aclose() # [/DEF:get_dashboard_tasks_history:Function] # [DEF:get_dashboard_thumbnail:Function] # @PURPOSE: Proxies Superset dashboard thumbnail with cache support. # @PRE: env_id must exist. # @POST: Returns image bytes or 202 when thumbnail is being prepared by Superset. @router.get("/{dashboard_ref}/thumbnail") async def get_dashboard_thumbnail( dashboard_ref: str, env_id: str, force: bool = Query(False), config_manager=Depends(get_config_manager), _ = Depends(has_permission("plugin:migration", "READ")) ): with belief_scope("get_dashboard_thumbnail", f"dashboard_ref={dashboard_ref}, env_id={env_id}, force={force}"): environments = config_manager.get_environments() env = next((e for e in environments if e.id == env_id), None) if not env: logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Environment not found: {env_id}") raise HTTPException(status_code=404, detail="Environment not found") client = AsyncSupersetClient(env) try: dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client) digest = None thumb_endpoint = None # Preferred flow (newer Superset): ask server to cache screenshot and return digest/image_url. try: screenshot_payload = await client.network.request( method="POST", endpoint=f"/dashboard/{dashboard_id}/cache_dashboard_screenshot/", json={"force": force}, ) payload = screenshot_payload.get("result", screenshot_payload) if isinstance(screenshot_payload, dict) else {} image_url = payload.get("image_url", "") if isinstance(payload, dict) else "" if isinstance(image_url, str) and image_url: matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", image_url) if matched: digest = matched.group(1) except DashboardNotFoundError: logger.warning( "[get_dashboard_thumbnail][Fallback] cache_dashboard_screenshot endpoint unavailable, fallback to dashboard.thumbnail_url" ) # Fallback flow (older Superset): read thumbnail_url from dashboard payload. if not digest: dashboard_payload = await client.network.request( method="GET", endpoint=f"/dashboard/{dashboard_id}", ) dashboard_data = dashboard_payload.get("result", dashboard_payload) if isinstance(dashboard_payload, dict) else {} thumbnail_url = dashboard_data.get("thumbnail_url", "") if isinstance(dashboard_data, dict) else "" if isinstance(thumbnail_url, str) and thumbnail_url: parsed = urlparse(thumbnail_url) parsed_path = parsed.path or thumbnail_url if parsed_path.startswith("/api/v1/"): parsed_path = parsed_path[len("/api/v1"):] thumb_endpoint = parsed_path matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", parsed_path) if matched: digest = matched.group(1) if not thumb_endpoint: thumb_endpoint = f"/dashboard/{dashboard_id}/thumbnail/{digest or 'latest'}/" thumb_response = await client.network.request( method="GET", endpoint=thumb_endpoint, raw_response=True, allow_redirects=True, ) if thumb_response.status_code == 202: payload_202: Dict[str, Any] = {} try: payload_202 = thumb_response.json() except Exception: payload_202 = {"message": "Thumbnail is being generated"} return JSONResponse(status_code=202, content=payload_202) content_type = thumb_response.headers.get("Content-Type", "image/png") return Response(content=thumb_response.content, media_type=content_type) except DashboardNotFoundError as e: logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Dashboard not found for thumbnail: {e}") raise HTTPException(status_code=404, detail="Dashboard thumbnail not found") except HTTPException: raise except Exception as e: logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Failed to fetch dashboard thumbnail: {e}") raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard thumbnail: {str(e)}") finally: await client.aclose() # [/DEF:get_dashboard_thumbnail:Function] # [DEF:MigrateRequest:DataClass] class MigrateRequest(BaseModel): source_env_id: str = Field(..., description="Source environment ID") target_env_id: str = Field(..., description="Target environment ID") dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to migrate") db_mappings: Optional[Dict[str, str]] = Field(None, description="Database mappings for migration") replace_db_config: bool = Field(False, description="Replace database configuration") # [/DEF:MigrateRequest:DataClass] # [DEF:TaskResponse:DataClass] class TaskResponse(BaseModel): task_id: str # [/DEF:TaskResponse:DataClass] # [DEF:migrate_dashboards:Function] # @PURPOSE: Trigger bulk migration of dashboards from source to target environment # @PRE: User has permission plugin:migration:execute # @PRE: source_env_id and target_env_id are valid environment IDs # @PRE: dashboard_ids is a non-empty list # @POST: Returns task_id for tracking migration progress # @POST: Task is created and queued for execution # @PARAM: request (MigrateRequest) - Migration request with source, target, and dashboard IDs # @RETURN: TaskResponse - Task ID for tracking # @RELATION: DISPATCHES -> MigrationPlugin # @RELATION: CALLS -> task_manager.create_task @router.post("/migrate", response_model=TaskResponse) async def migrate_dashboards( request: MigrateRequest, config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), _ = Depends(has_permission("plugin:migration", "EXECUTE")) ): with belief_scope("migrate_dashboards", f"source={request.source_env_id}, target={request.target_env_id}, count={len(request.dashboard_ids)}"): # Validate request if not request.dashboard_ids: logger.error("[migrate_dashboards][Coherence:Failed] No dashboard IDs provided") raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided") # Validate environments exist environments = config_manager.get_environments() source_env = next((e for e in environments if e.id == request.source_env_id), None) target_env = next((e for e in environments if e.id == request.target_env_id), None) if not source_env: logger.error(f"[migrate_dashboards][Coherence:Failed] Source environment not found: {request.source_env_id}") raise HTTPException(status_code=404, detail="Source environment not found") if not target_env: logger.error(f"[migrate_dashboards][Coherence:Failed] Target environment not found: {request.target_env_id}") raise HTTPException(status_code=404, detail="Target environment not found") try: # Create migration task task_params = { 'source_env_id': request.source_env_id, 'target_env_id': request.target_env_id, 'selected_ids': request.dashboard_ids, 'replace_db_config': request.replace_db_config, 'db_mappings': request.db_mappings or {} } task_obj = await task_manager.create_task( plugin_id='superset-migration', params=task_params ) logger.info(f"[migrate_dashboards][Coherence:OK] Migration task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards") return TaskResponse(task_id=str(task_obj.id)) except Exception as e: logger.error(f"[migrate_dashboards][Coherence:Failed] Failed to create migration task: {e}") raise HTTPException(status_code=503, detail=f"Failed to create migration task: {str(e)}") # [/DEF:migrate_dashboards:Function] # [DEF:BackupRequest:DataClass] class BackupRequest(BaseModel): env_id: str = Field(..., description="Environment ID") dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to backup") schedule: Optional[str] = Field(None, description="Cron schedule for recurring backups (e.g., '0 0 * * *')") # [/DEF:BackupRequest:DataClass] # [DEF:backup_dashboards:Function] # @PURPOSE: Trigger bulk backup of dashboards with optional cron schedule # @PRE: User has permission plugin:backup:execute # @PRE: env_id is a valid environment ID # @PRE: dashboard_ids is a non-empty list # @POST: Returns task_id for tracking backup progress # @POST: Task is created and queued for execution # @POST: If schedule is provided, a scheduled task is created # @PARAM: request (BackupRequest) - Backup request with environment and dashboard IDs # @RETURN: TaskResponse - Task ID for tracking # @RELATION: DISPATCHES -> BackupPlugin # @RELATION: CALLS -> task_manager.create_task @router.post("/backup", response_model=TaskResponse) async def backup_dashboards( request: BackupRequest, config_manager=Depends(get_config_manager), task_manager=Depends(get_task_manager), _ = Depends(has_permission("plugin:backup", "EXECUTE")) ): with belief_scope("backup_dashboards", f"env={request.env_id}, count={len(request.dashboard_ids)}, schedule={request.schedule}"): # Validate request if not request.dashboard_ids: logger.error("[backup_dashboards][Coherence:Failed] No dashboard IDs provided") raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided") # Validate environment exists environments = config_manager.get_environments() env = next((e for e in environments if e.id == request.env_id), None) if not env: logger.error(f"[backup_dashboards][Coherence:Failed] Environment not found: {request.env_id}") raise HTTPException(status_code=404, detail="Environment not found") try: # Create backup task task_params = { 'env': request.env_id, 'dashboards': request.dashboard_ids, 'schedule': request.schedule } task_obj = await task_manager.create_task( plugin_id='superset-backup', params=task_params ) logger.info(f"[backup_dashboards][Coherence:OK] Backup task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards") return TaskResponse(task_id=str(task_obj.id)) except Exception as e: logger.error(f"[backup_dashboards][Coherence:Failed] Failed to create backup task: {e}") raise HTTPException(status_code=503, detail=f"Failed to create backup task: {str(e)}") # [/DEF:backup_dashboards:Function] # [/DEF:backend.src.api.routes.dashboards:Module]