319 lines
15 KiB
Python
319 lines
15 KiB
Python
# [DEF:MigrationApi:Module]
|
|
# @COMPLEXITY: 5
|
|
# @SEMANTICS: api, migration, dashboards, sync, dry-run
|
|
# @PURPOSE: HTTP contract layer for migration orchestration, settings, dry-run, and mapping sync endpoints.
|
|
# @LAYER: Infra
|
|
# @RELATION: DEPENDS_ON ->[AppDependencies]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.core.database]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.core.superset_client.SupersetClient]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.core.migration.dry_run_orchestrator.MigrationDryRunService]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.core.mapping_service.IdMappingService]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.models.dashboard]
|
|
# @RELATION: DEPENDS_ON ->[backend.src.models.mapping]
|
|
# @INVARIANT: Migration endpoints never execute with invalid environment references and always return explicit HTTP errors on guard failures.
|
|
# @PRE: Backend core services initialized and Database session available.
|
|
# @POST: Migration tasks are enqueued or dry-run results are computed and returned.
|
|
# @SIDE_EFFECT: Enqueues long-running tasks, potentially mutates ResourceMapping table, and performs remote Superset API calls.
|
|
# @DATA_CONTRACT: [DashboardSelection | QueryParams] -> [TaskResponse | DryRunResult | MappingSummary]
|
|
# @TEST_CONTRACT: [DashboardSelection + configured envs] -> [task_id | dry-run result | sync summary]
|
|
# @TEST_SCENARIO: [invalid_environment] -> [HTTP_400_or_404]
|
|
# @TEST_SCENARIO: [valid_execution] -> [success_payload_with_required_fields]
|
|
# @TEST_EDGE: [missing_field] ->[HTTP_400]
|
|
# @TEST_EDGE: [invalid_type] ->[validation_error]
|
|
# @TEST_EDGE: [external_fail] ->[HTTP_500]
|
|
# @TEST_INVARIANT: [EnvironmentValidationBeforeAction] -> VERIFIED_BY: [invalid_environment, valid_execution]
|
|
|
|
from fastapi import APIRouter, Depends, HTTPException, Query
|
|
from typing import List, Dict, Any, Optional
|
|
from sqlalchemy.orm import Session
|
|
from ...dependencies import get_config_manager, get_task_manager, has_permission
|
|
from ...core.database import get_db
|
|
from ...models.dashboard import DashboardMetadata, DashboardSelection
|
|
from ...core.superset_client import SupersetClient
|
|
from ...core.logger import logger, belief_scope
|
|
from ...core.migration.dry_run_orchestrator import MigrationDryRunService
|
|
from ...core.mapping_service import IdMappingService
|
|
from ...models.mapping import ResourceMapping
|
|
|
|
router = APIRouter(prefix="/api", tags=["migration"])
|
|
|
|
# [DEF:get_dashboards:Function]
|
|
# @COMPLEXITY: 3
|
|
# @PURPOSE: Fetch dashboard metadata from a requested environment for migration selection UI.
|
|
# @PRE: env_id is provided and exists in configured environments.
|
|
# @POST: Returns List[DashboardMetadata] for the resolved environment; emits HTTP_404 when environment is absent.
|
|
# @SIDE_EFFECT: Reads environment configuration and performs remote Superset metadata retrieval over network.
|
|
# @DATA_CONTRACT: Input[str env_id] -> Output[List[DashboardMetadata]]
|
|
@router.get("/environments/{env_id}/dashboards", response_model=List[DashboardMetadata])
|
|
async def get_dashboards(
|
|
env_id: str,
|
|
config_manager=Depends(get_config_manager),
|
|
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
|
|
):
|
|
with belief_scope("get_dashboards", f"env_id={env_id}"):
|
|
logger.reason(f"Fetching dashboards for environment: {env_id}")
|
|
environments = config_manager.get_environments()
|
|
env = next((e for e in environments if e.id == env_id), None)
|
|
|
|
if not env:
|
|
logger.explore(f"Environment {env_id} not found in configuration")
|
|
raise HTTPException(status_code=404, detail="Environment not found")
|
|
|
|
client = SupersetClient(env)
|
|
dashboards = client.get_dashboards_summary()
|
|
logger.reflect(f"Retrieved {len(dashboards)} dashboards from {env_id}")
|
|
return dashboards
|
|
# [/DEF:get_dashboards:Function]
|
|
|
|
# [DEF:execute_migration:Function]
|
|
# @COMPLEXITY: 5
|
|
# @PURPOSE: Validate migration selection and enqueue asynchronous migration task execution.
|
|
# @PRE: DashboardSelection payload is valid and both source/target environments exist.
|
|
# @POST: Returns {"task_id": str, "message": str} when task creation succeeds; emits HTTP_400/HTTP_500 on failure.
|
|
# @SIDE_EFFECT: Reads configuration, writes task record through task manager, and writes operational logs.
|
|
# @DATA_CONTRACT: Input[DashboardSelection] -> Output[Dict[str, str]]
|
|
@router.post("/migration/execute")
|
|
async def execute_migration(
|
|
selection: DashboardSelection,
|
|
config_manager=Depends(get_config_manager),
|
|
task_manager=Depends(get_task_manager),
|
|
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
|
|
):
|
|
with belief_scope("execute_migration"):
|
|
logger.reason(f"Initiating migration from {selection.source_env_id} to {selection.target_env_id}")
|
|
|
|
# Validate environments exist
|
|
environments = config_manager.get_environments()
|
|
env_ids = {e.id for e in environments}
|
|
|
|
if selection.source_env_id not in env_ids or selection.target_env_id not in env_ids:
|
|
logger.explore("Invalid environment selection", extra={"source": selection.source_env_id, "target": selection.target_env_id})
|
|
raise HTTPException(status_code=400, detail="Invalid source or target environment")
|
|
|
|
# Include replace_db_config and fix_cross_filters in the task parameters
|
|
task_params = selection.dict()
|
|
task_params['replace_db_config'] = selection.replace_db_config
|
|
task_params['fix_cross_filters'] = selection.fix_cross_filters
|
|
|
|
logger.reason(f"Creating migration task with {len(selection.selected_ids)} dashboards")
|
|
|
|
try:
|
|
task = await task_manager.create_task("superset-migration", task_params)
|
|
logger.reflect(f"Migration task created: {task.id}")
|
|
return {"task_id": task.id, "message": "Migration initiated"}
|
|
except Exception as e:
|
|
logger.explore(f"Task creation failed: {e}")
|
|
raise HTTPException(status_code=500, detail=f"Failed to create migration task: {str(e)}")
|
|
# [/DEF:execute_migration:Function]
|
|
|
|
|
|
# [DEF:dry_run_migration:Function]
|
|
# @COMPLEXITY: 5
|
|
# @PURPOSE: Build pre-flight migration diff and risk summary without mutating target systems.
|
|
# @PRE: DashboardSelection is valid, source and target environments exist, differ, and selected_ids is non-empty.
|
|
# @POST: Returns deterministic dry-run payload; emits HTTP_400 for guard violations and HTTP_500 for orchestrator value errors.
|
|
# @SIDE_EFFECT: Reads local mappings from DB and fetches source/target metadata via Superset API.
|
|
# @DATA_CONTRACT: Input[DashboardSelection] -> Output[Dict[str, Any]]
|
|
@router.post("/migration/dry-run", response_model=Dict[str, Any])
|
|
async def dry_run_migration(
|
|
selection: DashboardSelection,
|
|
config_manager=Depends(get_config_manager),
|
|
db: Session = Depends(get_db),
|
|
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
|
|
):
|
|
with belief_scope("dry_run_migration"):
|
|
logger.reason(f"Starting dry run: {selection.source_env_id} -> {selection.target_env_id}")
|
|
|
|
environments = config_manager.get_environments()
|
|
env_map = {env.id: env for env in environments}
|
|
source_env = env_map.get(selection.source_env_id)
|
|
target_env = env_map.get(selection.target_env_id)
|
|
|
|
if not source_env or not target_env:
|
|
logger.explore("Invalid environment selection for dry run")
|
|
raise HTTPException(status_code=400, detail="Invalid source or target environment")
|
|
|
|
if selection.source_env_id == selection.target_env_id:
|
|
logger.explore("Source and target environments are identical")
|
|
raise HTTPException(status_code=400, detail="Source and target environments must be different")
|
|
|
|
if not selection.selected_ids:
|
|
logger.explore("No dashboards selected for dry run")
|
|
raise HTTPException(status_code=400, detail="No dashboards selected for dry run")
|
|
|
|
service = MigrationDryRunService()
|
|
source_client = SupersetClient(source_env)
|
|
target_client = SupersetClient(target_env)
|
|
|
|
try:
|
|
result = service.run(
|
|
selection=selection,
|
|
source_client=source_client,
|
|
target_client=target_client,
|
|
db=db,
|
|
)
|
|
logger.reflect("Dry run analysis complete")
|
|
return result
|
|
except ValueError as exc:
|
|
logger.explore(f"Dry run orchestrator failed: {exc}")
|
|
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
|
# [/DEF:dry_run_migration:Function]
|
|
|
|
# [DEF:get_migration_settings:Function]
|
|
# @COMPLEXITY: 3
|
|
# @PURPOSE: Read and return configured migration synchronization cron expression.
|
|
# @PRE: Configuration store is available and requester has READ permission.
|
|
# @POST: Returns {"cron": str} reflecting current persisted settings value.
|
|
# @SIDE_EFFECT: Reads configuration from config manager.
|
|
# @DATA_CONTRACT: Input[None] -> Output[Dict[str, str]]
|
|
@router.get("/migration/settings", response_model=Dict[str, str])
|
|
async def get_migration_settings(
|
|
config_manager=Depends(get_config_manager),
|
|
_ = Depends(has_permission("plugin:migration", "READ"))
|
|
):
|
|
with belief_scope("get_migration_settings"):
|
|
config = config_manager.get_config()
|
|
cron = config.settings.migration_sync_cron
|
|
return {"cron": cron}
|
|
# [/DEF:get_migration_settings:Function]
|
|
|
|
# [DEF:update_migration_settings:Function]
|
|
# @COMPLEXITY: 3
|
|
# @PURPOSE: Validate and persist migration synchronization cron expression update.
|
|
# @PRE: Payload includes "cron" key and requester has WRITE permission.
|
|
# @POST: Returns {"cron": str, "status": "updated"} and persists updated cron value.
|
|
# @SIDE_EFFECT: Mutates configuration and writes persisted config through config manager.
|
|
# @DATA_CONTRACT: Input[Dict[str, str]] -> Output[Dict[str, str]]
|
|
@router.put("/migration/settings", response_model=Dict[str, str])
|
|
async def update_migration_settings(
|
|
payload: Dict[str, str],
|
|
config_manager=Depends(get_config_manager),
|
|
_ = Depends(has_permission("plugin:migration", "WRITE"))
|
|
):
|
|
with belief_scope("update_migration_settings"):
|
|
if "cron" not in payload:
|
|
raise HTTPException(status_code=400, detail="Missing 'cron' field in payload")
|
|
|
|
cron_expr = payload["cron"]
|
|
|
|
config = config_manager.get_config()
|
|
config.settings.migration_sync_cron = cron_expr
|
|
config_manager.save_config(config)
|
|
|
|
return {"cron": cron_expr, "status": "updated"}
|
|
# [/DEF:update_migration_settings:Function]
|
|
|
|
# [DEF:get_resource_mappings:Function]
|
|
# @COMPLEXITY: 3
|
|
# @PURPOSE: Fetch synchronized resource mappings with optional filters and pagination for migration mappings view.
|
|
# @PRE: skip>=0, 1<=limit<=500, DB session is active, requester has READ permission.
|
|
# @POST: Returns {"items": [...], "total": int} where items reflect applied filters and pagination.
|
|
# @SIDE_EFFECT: Executes database read queries against ResourceMapping table.
|
|
# @DATA_CONTRACT: Input[QueryParams] -> Output[Dict[str, Any]]
|
|
@router.get("/migration/mappings-data", response_model=Dict[str, Any])
|
|
async def get_resource_mappings(
|
|
skip: int = Query(0, ge=0),
|
|
limit: int = Query(50, ge=1, le=500),
|
|
search: Optional[str] = Query(None, description="Search by resource name or UUID"),
|
|
env_id: Optional[str] = Query(None, description="Filter by environment ID"),
|
|
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
|
|
db: Session = Depends(get_db),
|
|
_ = Depends(has_permission("plugin:migration", "READ"))
|
|
):
|
|
with belief_scope("get_resource_mappings"):
|
|
query = db.query(ResourceMapping)
|
|
|
|
if env_id:
|
|
query = query.filter(ResourceMapping.environment_id == env_id)
|
|
|
|
if resource_type:
|
|
query = query.filter(ResourceMapping.resource_type == resource_type.upper())
|
|
|
|
if search:
|
|
search_term = f"%{search}%"
|
|
query = query.filter(
|
|
(ResourceMapping.resource_name.ilike(search_term)) |
|
|
(ResourceMapping.uuid.ilike(search_term))
|
|
)
|
|
|
|
total = query.count()
|
|
mappings = query.order_by(ResourceMapping.resource_type, ResourceMapping.resource_name).offset(skip).limit(limit).all()
|
|
|
|
items = []
|
|
for m in mappings:
|
|
items.append({
|
|
"id": m.id,
|
|
"environment_id": m.environment_id,
|
|
"resource_type": m.resource_type.value if m.resource_type else None,
|
|
"uuid": m.uuid,
|
|
"remote_id": m.remote_integer_id,
|
|
"resource_name": m.resource_name,
|
|
"last_synced_at": m.last_synced_at.isoformat() if m.last_synced_at else None
|
|
})
|
|
|
|
return {"items": items, "total": total}
|
|
# [/DEF:get_resource_mappings:Function]
|
|
|
|
# [DEF:trigger_sync_now:Function]
|
|
# @COMPLEXITY: 3
|
|
# @PURPOSE: Trigger immediate ID synchronization for every configured environment.
|
|
# @PRE: At least one environment is configured and requester has EXECUTE permission.
|
|
# @POST: Returns sync summary with synced/failed counts after attempting all environments.
|
|
# @SIDE_EFFECT: Upserts Environment rows, commits DB transaction, performs network sync calls, and writes logs.
|
|
# @DATA_CONTRACT: Input[None] -> Output[Dict[str, Any]]
|
|
@router.post("/migration/sync-now", response_model=Dict[str, Any])
|
|
async def trigger_sync_now(
|
|
config_manager=Depends(get_config_manager),
|
|
db: Session = Depends(get_db),
|
|
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
|
|
):
|
|
with belief_scope("trigger_sync_now"):
|
|
from ...core.logger import logger
|
|
from ...models.mapping import Environment as EnvironmentModel
|
|
|
|
config = config_manager.get_config()
|
|
environments = config.environments
|
|
|
|
if not environments:
|
|
raise HTTPException(status_code=400, detail="No environments configured")
|
|
|
|
# Ensure each environment exists in DB (upsert) to satisfy FK constraints
|
|
for env in environments:
|
|
existing = db.query(EnvironmentModel).filter_by(id=env.id).first()
|
|
if not existing:
|
|
db_env = EnvironmentModel(
|
|
id=env.id,
|
|
name=env.name,
|
|
url=env.url,
|
|
credentials_id=env.id, # Use env.id as credentials reference
|
|
)
|
|
db.add(db_env)
|
|
logger.info(f"[trigger_sync_now][Action] Created environment row for {env.id}")
|
|
else:
|
|
existing.name = env.name
|
|
existing.url = env.url
|
|
db.commit()
|
|
|
|
service = IdMappingService(db)
|
|
results = {"synced": [], "failed": []}
|
|
|
|
for env in environments:
|
|
try:
|
|
client = SupersetClient(env)
|
|
service.sync_environment(env.id, client)
|
|
results["synced"].append(env.id)
|
|
logger.info(f"[trigger_sync_now][Action] Synced environment {env.id}")
|
|
except Exception as e:
|
|
results["failed"].append({"env_id": env.id, "error": str(e)})
|
|
logger.error(f"[trigger_sync_now][Error] Failed to sync {env.id}: {e}")
|
|
|
|
return {
|
|
"status": "completed",
|
|
"synced_count": len(results["synced"]),
|
|
"failed_count": len(results["failed"]),
|
|
"details": results
|
|
}
|
|
# [/DEF:trigger_sync_now:Function]
|
|
|
|
# [/DEF:MigrationApi:Module]
|