Compare commits

...

3 Commits

Author SHA1 Message Date
8fa951fc93 dry run migration 2026-02-27 20:48:18 +03:00
149d230426 semantic protocol update 2026-02-27 20:48:06 +03:00
4c601fbe06 [
{
    "file": "backend/src/api/routes/__tests__/test_dashboards.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 9 previous findings remediated. @TEST_FIXTURE data aligned, all @TEST_EDGE scenarios covered, all @PRE negative tests present, all @SIDE_EFFECT assertions added. Full contract compliance."
  },
  {
    "file": "backend/src/api/routes/__tests__/test_datasets.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 6 previous findings remediated. Full @PRE boundary coverage including page_size>100, empty IDs, missing env. @SIDE_EFFECT assertions added. 503 error path tested."
  },
  {
    "file": "backend/src/core/auth/__tests__/test_auth.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 4 previous findings remediated. @SIDE_EFFECT last_login verified. Inactive user @PRE negative test added. Empty hash edge case covered. provision_adfs_user tested for both new and existing user paths."
  },
  {
    "file": "backend/src/services/__tests__/test_resource_service.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Both prior recommendations implemented. Full edge case coverage for _get_last_task_for_resource. No anti-patterns detected."
  },
  {
    "file": "backend/tests/test_resource_hubs.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Pagination boundary tests added. All @TEST_EDGE scenarios now covered. No anti-patterns detected."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_chat.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Contract scanning remains sound."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_confirmation.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Confirmation flow testing remains sound."
  }
]
2026-02-27 09:59:57 +03:00
30 changed files with 104455 additions and 58376 deletions

View File

@@ -17,7 +17,7 @@ description: Audit AI-generated unit tests. Your goal is to aggressively search
2. **The Logic Mirror (Echoing):**
- *Definition:* The test re-implements the exact same algorithmic logic found in the source code to calculate the `expected_result`. If the original logic is flawed, the test will falsely pass.
- *Rule:* Tests must assert against **static, predefined outcomes** (from `@TEST_` or explicit constants), NOT dynamically calculated outcomes using the same logic as the source.
- *Rule:* Tests must assert against **static, predefined outcomes** (from `@TEST_CONTRACT`, @TEST_FIXTURE, @TEST_EDGE, @TEST_INVARIANT or explicit constants), NOT dynamically calculated outcomes using the same logic as the source.
3. **The "Happy Path" Illusion:**
- *Definition:* The test suite only checks successful executions but ignores the `@PRE` conditions (Negative Testing).

1602
.ai/MODULE_MAP.md Normal file

File diff suppressed because it is too large Load Diff

4523
.ai/PROJECT_MAP.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,76 +3,28 @@
# @SEMANTICS: Finance, ACID, Transfer, Ledger
# @PURPOSE: Core banking transaction processor with ACID guarantees.
# @LAYER: Domain (Core)
# @RELATION: DEPENDS_ON -> [DEF:Infra:PostgresDB]
# @RELATION: DEPENDS_ON -> [DEF:Infra:AuditLog]
# @RELATION: DEPENDS_ON ->[DEF:Infra:PostgresDB]
#
# @INVARIANT: Total system balance must remain constant (Double-Entry Bookkeeping).
# @INVARIANT: Negative transfers are strictly forbidden.
# @INVARIANT: No partial commit must occur under failure (ACID Atomicity).
# @TEST_CONTRACT: TransferInput ->
# {
# required_fields: {
# sender_id: str,
# receiver_id: str,
# amount: Decimal
# },
# invariants: [
# "amount > 0",
# "sender_id != receiver_id"
# ],
# constraints: [
# "sender must exist",
# "receiver must exist"
# ]
# }
# --- Test Specifications (The "What" and "Why", not the "Data") ---
# @TEST_CONTRACT: Input -> TransferInputDTO, Output -> TransferResultDTO
# @TEST_CONTRACT: TransferResult ->
# {
# required_fields: {
# tx_id: str,
# status: str,
# new_balance: Decimal
# },
# invariants: [
# "status == COMPLETED implies balance mutation occurred"
# ]
# }
# Happy Path
# @TEST_SCENARIO: sufficient_funds -> Returns COMPLETED, balances updated.
# @TEST_FIXTURE: sufficient_funds -> file:./__tests__/fixtures/transfers.json#happy_path
# @TEST_FIXTURE: sufficient_funds ->
# {
# sender_balance: 500.00,
# receiver_balance: 100.00,
# amount: 100.00
# }
# Edge Cases (CRITICAL)
# @TEST_SCENARIO: insufficient_funds -> Throws BusinessRuleViolation("INSUFFICIENT_FUNDS").
# @TEST_SCENARIO: negative_amount -> Throws BusinessRuleViolation("Transfer amount must be positive.").
# @TEST_SCENARIO: self_transfer -> Throws BusinessRuleViolation("Cannot transfer to self.").
# @TEST_SCENARIO: audit_failure -> Throws RuntimeError("TRANSACTION_ABORTED").
# @TEST_SCENARIO: concurrency_conflict -> Throws DBTransactionError.
# @TEST_EDGE: insufficient_funds ->
# {
# sender_balance: 50.00,
# receiver_balance: 100.00,
# amount: 100.00
# }
#
# @TEST_EDGE: negative_amount ->
# {
# sender_balance: 500.00,
# receiver_balance: 100.00,
# amount: -10.00
# }
#
# @TEST_EDGE: self_transfer ->
# {
# sender_id: "acc_A",
# receiver_id: "acc_A",
# amount: 10.00
# }
# @TEST_EDGE: audit_failure -> raises Exception
# @TEST_EDGE: concurrency_conflict -> special: concurrent_execution
# @TEST_INVARIANT: total_balance_constant -> verifies: [sufficient_funds, concurrency_conflict]
# @TEST_INVARIANT: no_partial_commit -> verifies: [audit_failure]
# @TEST_INVARIANT: negative_transfer_forbidden -> verifies: [negative_amount]
# Linking Tests to Invariants
# @TEST_INVARIANT: total_balance_constant -> VERIFIED_BY: [sufficient_funds, concurrency_conflict]
# @TEST_INVARIANT: negative_transfer_forbidden -> VERIFIED_BY: [negative_amount]
from decimal import Decimal

View File

@@ -5,6 +5,7 @@
#### I. ЗАКОН (АКСИОМЫ)
1. Смысл первичен. Код вторичен.
2.Слепота недопустима. Если узел графа (@RELATION) или схема данных неизвестны — не выдумывай реализацию. Остановись и запроси контекст.
2. Контракт (@PRE/@POST) — источник истины.
**3. UX — это логика, а не декор. Состояния интерфейса — часть контракта.**
4. Структура `[DEF]...[/DEF]` — нерушима.
@@ -47,11 +48,13 @@
@PRE: Входные условия.
@POST: Гарантии выхода.
@SIDE_EFFECT: Мутации, IO.
@DATA_CONTRACT: Ссылка на DTO/Pydantic модель. Заменяет ручное описание @PARAM. Формат: Input -> [Model], Output -> [Model].
**UX Теги (Svelte/Frontend):**
**@UX_STATE:** `[StateName] -> Визуальное поведение` (Idle, Loading, Error).
**@UX_FEEDBACK:** Реакция системы (Toast, Shake, Red Border).
**@UX_RECOVERY:** Механизм исправления ошибки пользователем (Retry, Clear Input).
**@UX_REATIVITY:** Явное указание использования рун. Формат: State: $state, Derived: $derived. Никаких устаревших export let.
**UX Testing Tags (для Tester Agent):**
**@UX_TEST:** Спецификация теста для UX состояния.
@@ -63,41 +66,26 @@
#### V. АДАПТАЦИЯ (TIERS)
Определяется тегом `@TIER` в Header.
1. **CRITICAL** (Core/Security/**Complex UI**):
- Требование: Полный контракт (включая **все @UX теги**), Граф, Инварианты, Строгие Логи.
```
@TEST_CONTRACT: Обязательное описание структуры входных/выходных данных.
Формат:
@TEST_CONTRACT: Name -> {
required_fields: {field: type},
optional_fields: {field: type},
invariants: [...]
}
### V. УРОВНИ СТРОГОСТИ (TIERS)
Степень контроля задается тегом `@TIER` в Header.
@TEST_FIXTURE: Эталонный корректный пример (happy-path).
Формат:
@TEST_FIXTURE: fixture_name -> {INLINE_JSON | PATH#fragment}
**1. CRITICAL** (Ядро / Безопасность / Сложный UI)
- **Закон:** Полный GRACE. Граф, Инварианты, Строгий Лог, все `@UX` теги.
- **Догма Тестирования:** Тесты рождаются из контракта. Голый код без данных — слеп.
- `@TEST_CONTRACT: InputType -> OutputType`. (Строгий интерфейс).
- `@TEST_SCENARIO: name -> Ожидаемое поведение`. (Суть теста).
- `@TEST_FIXTURE: name -> file:PATH | INLINE_JSON`. (Данные для Happy Path).
- `@TEST_EDGE: name -> Описание сбоя`. (Минимум 3 границы).
- *Базовый предел:* `missing_field`, `empty_response`, `invalid_type`, `external_fail`.
- `@TEST_INVARIANT: inv_name -> VERIFIED_BY: [scenario_1, ...]`. (Смыкание логики).
- **Исполнение:** Tester Agent обязан строить проверки строго по этим тегам.
@TEST_EDGE: Граничные случаи (минимум 3 для CRITICAL).
Формат:
@TEST_EDGE: case_name -> {INLINE_JSON | special_case}
**2. STANDARD** (Бизнес-логика / Формы)
- **Закон:** База. (`@PURPOSE`, `@UX_STATE`, Лог, `@RELATION`).
- **Исключение:** Для сложных форм внедряй `@TEST_SCENARIO` и `@TEST_INVARIANT`.
@TEST_INVARIANT: Обязательно. Связывает тесты с инвариантами.
Формат:
@TEST_INVARIANT: invariant_name -> verifies: [test_case_1, test_case_2]
Обязательные edge-типы для CRITICAL:
- missing_required_field
- empty_response
- invalid_type
- external_failure (exception)
```
- Tester Agent **ОБЯЗАН** использовать @TEST_CONTRACT, @TEST_FIXTURE и @TEST_EDGE при написании тестов для CRITICAL модулей.
2. **STANDARD** (BizLogic/**Forms**):
- Требование: Базовый контракт (@PURPOSE, @UX_STATE), Логи, @RELATION.
- @TEST_INVARIANT, @TEST_CONTRACT: Рекомендуется для Complex Forms.
3. **TRIVIAL** (DTO/**Atoms**):
- Требование: Только Якоря [DEF] и @PURPOSE.
**3. TRIVIAL** (DTO / Атомы UI / Утилиты)
- **Закон:** Каркас. Только якорь `[DEF]` и `@PURPOSE`. Данные и графы не требуются.
#### VI. ЛОГИРОВАНИЕ (ДАО МОЛЕКУЛЫ / MOLECULAR TOPOLOGY)
Цель: Трассировка. Самокоррекция. Управление Матрицей Внимания ("Химия мышления").
@@ -129,10 +117,16 @@
**Незыблемое правило:** Всякому логу системы — тавро `source`. Для Внешенго Мира (Svelte) начертай рунами вручную: `console.log("[ID][REFLECT] Msg")`.
#### VII. АЛГОРИТМ ГЕНЕРАЦИИ
1. АНАЛИЗ. Оцени TIER, слой и UX-требования.
#### VIII. АЛГОРИТМ ГЕНЕРАЦИИ И ВЫХОД ИЗ ТУПИКА
1. АНАЛИЗ. Оцени TIER, слой и UX-требования. Чего не хватает? Запроси `[NEED_CONTEXT: id]`.
2. КАРКАС. Создай `[DEF]`, Header и Контракты.
3. РЕАЛИЗАЦИЯ. Напиши логику, удовлетворяющую Контракту (и UX-состояниям).
3. РЕАЛИЗАЦИЯ. Напиши логику, удовлетворяющую Контракту (и UX-состояниям). Орошай путь логами `[REASON]` и `[REFLECT]`.
4. ЗАМЫКАНИЕ. Закрой все `[/DEF]`.
**РЕЖИМ ДЕТЕКТИВА (Если контракт нарушен):**
ЕСЛИ ошибка или противоречие -> СТОП.
1. Выведи `[COHERENCE_CHECK_FAILED]`.
2. Сформулируй гипотезу: `[EXPLORE] Ошибка в I/O, состоянии или зависимости?`
3. Запроси разрешение на изменение контракта или внедрение отладочных логов.
ЕСЛИ ошибка или противоречие -> СТОП. Выведи `[COHERENCE_CHECK_FAILED]`.

File diff suppressed because it is too large Load Diff

View File

@@ -10,6 +10,41 @@ from datetime import datetime, timezone
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user for get_current_user dependency overrides
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app)
@@ -18,45 +53,35 @@ client = TestClient(app)
# @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists
# @POST: Response matches DashboardsResponse schema
def test_get_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task_mgr.return_value.get_all_tasks.return_value = []
# Mock resource service response
async def mock_get_dashboards(env, tasks):
return [
{
"id": 1,
"title": "Sales Report",
"slug": "sales",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
# Mock permission
mock_perm.return_value = lambda: True
def test_get_dashboards_success(mock_deps):
"""Uses @TEST_FIXTURE: dashboard_list_happy data."""
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
response = client.get("/api/dashboards?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "dashboards" in data
assert "total" in data
assert "page" in data
# @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{
"id": 1,
"title": "Main Revenue",
"slug": "main-revenue",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
])
response = client.get("/api/dashboards?env_id=prod")
assert response.status_code == 200
data = response.json()
# exhaustive @POST assertions
assert "dashboards" in data
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Main Revenue"
assert data["total"] == 1
assert "page" in data
DashboardsResponse(**data)
# [/DEF:test_get_dashboards_success:Function]
@@ -66,55 +91,81 @@ def test_get_dashboards_success():
# @TEST: GET /api/dashboards filters by search term
# @PRE: search parameter provided
# @POST: Only matching dashboards returned
def test_get_dashboards_with_search():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_task_mgr.return_value.get_all_tasks.return_value = []
async def mock_get_dashboards(env, tasks):
return [
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing Dashboard", "slug": "marketing"}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
mock_perm.return_value = lambda: True
def test_get_dashboards_with_search(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
# Filtered by search term
async def mock_get_dashboards(env, tasks):
return [
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing Dashboard", "slug": "marketing"}
]
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
# @POST: Filtered result count must match search
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
# [/DEF:test_get_dashboards_with_search:Function]
# [DEF:test_get_dashboards_empty:Function]
# @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}
def test_get_dashboards_empty(mock_deps):
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
mock_env = MagicMock()
mock_env.id = "empty_env"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[])
response = client.get("/api/dashboards?env_id=empty_env")
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1
DashboardsResponse(**data)
# [/DEF:test_get_dashboards_empty:Function]
# [DEF:test_get_dashboards_superset_failure:Function]
# @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}
def test_get_dashboards_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"]
# [/DEF:test_get_dashboards_superset_failure:Function]
# [DEF:test_get_dashboards_env_not_found:Function]
# @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_get_dashboards_env_not_found():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_get_dashboards_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboards_env_not_found:Function]
@@ -124,40 +175,29 @@ def test_get_dashboards_env_not_found():
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error
def test_get_dashboards_invalid_pagination():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
def test_get_dashboards_invalid_pagination(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Invalid page
response = client.get("/api/dashboards?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
# Invalid page
response = client.get("/api/dashboards?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/dashboards?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/dashboards?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_dashboards_invalid_pagination:Function]
# [DEF:test_get_dashboard_detail_success:Function]
# @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets
def test_get_dashboard_detail_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
def test_get_dashboard_detail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_client.get_dashboard_detail.return_value = {
@@ -205,56 +245,46 @@ def test_get_dashboard_detail_success():
# [DEF:test_get_dashboard_detail_env_not_found:Function]
# @TEST: GET /api/dashboards/{id} returns 404 for missing environment
def test_get_dashboard_detail_env_not_found():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
def test_get_dashboard_detail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42?env_id=missing")
response = client.get("/api/dashboards/42?env_id=missing")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboard_detail_env_not_found:Function]
# [DEF:test_migrate_dashboards_success:Function]
# @TEST: POST /api/dashboards/migrate creates migration task
# @PRE: Valid source_env_id, target_env_id, dashboard_ids
# @POST: Returns task_id
def test_migrate_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environments
mock_source = MagicMock()
mock_source.id = "source"
mock_target = MagicMock()
mock_target.id = "target"
mock_config.return_value.get_environments.return_value = [mock_source, mock_target]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-migrate-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
# @POST: Returns task_id and create_task was called
def test_migrate_dashboards_success(mock_deps):
mock_source = MagicMock()
mock_source.id = "source"
mock_target = MagicMock()
mock_target.id = "target"
mock_deps["config"].get_environments.return_value = [mock_source, mock_target]
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": [1, 2, 3],
"db_mappings": {"old_db": "new_db"}
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
mock_task = MagicMock()
mock_task.id = "task-migrate-123"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": [1, 2, 3],
"db_mappings": {"old_db": "new_db"}
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_migrate_dashboards_success:Function]
@@ -264,154 +294,184 @@ def test_migrate_dashboards_success():
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
# @PRE: dashboard_ids is empty
# @POST: Returns 400 error
def test_migrate_dashboards_no_ids():
with patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_perm.return_value = lambda: True
def test_migrate_dashboards_no_ids(mock_deps):
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": []
}
)
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": []
}
)
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_no_ids:Function]
# [DEF:test_migrate_dashboards_env_not_found:Function]
# @PRE: source_env_id and target_env_id are valid environment IDs
def test_migrate_dashboards_env_not_found(mock_deps):
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "ghost",
"target_env_id": "t",
"dashboard_ids": [1]
}
)
assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_env_not_found:Function]
# [DEF:test_backup_dashboards_success:Function]
# @TEST: POST /api/dashboards/backup creates backup task
# @PRE: Valid env_id, dashboard_ids
# @POST: Returns task_id
def test_backup_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-backup-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
# @POST: Returns task_id and create_task was called
def test_backup_dashboards_success(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
response = client.post(
"/api/dashboards/backup",
json={
"env_id": "prod",
"dashboard_ids": [1, 2, 3],
"schedule": "0 0 * * *"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
mock_task = MagicMock()
mock_task.id = "task-backup-456"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
response = client.post(
"/api/dashboards/backup",
json={
"env_id": "prod",
"dashboard_ids": [1, 2, 3],
"schedule": "0 0 * * *"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_backup_dashboards_success:Function]
# [DEF:test_backup_dashboards_env_not_found:Function]
# @PRE: env_id is a valid environment ID
def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/dashboards/backup",
json={
"env_id": "ghost",
"dashboard_ids": [1]
}
)
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_backup_dashboards_env_not_found:Function]
# [DEF:test_get_database_mappings_success:Function]
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
# @PRE: Valid source_env_id, target_env_id
# @POST: Returns list of database mappings
def test_get_database_mappings_success():
with patch("src.api.routes.dashboards.get_mapping_service") as mock_service, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock mapping service
mock_service.return_value.get_suggestions = AsyncMock(return_value=[
{
"source_db": "old_sales",
"target_db": "new_sales",
"source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2",
"confidence": 0.95
}
])
# Mock permission
mock_perm.return_value = lambda: True
def test_get_database_mappings_success(mock_deps):
mock_source = MagicMock()
mock_source.id = "prod"
mock_target = MagicMock()
mock_target.id = "staging"
mock_deps["config"].get_environments.return_value = [mock_source, mock_target]
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging")
assert response.status_code == 200
data = response.json()
assert "mappings" in data
mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[
{
"source_db": "old_sales",
"target_db": "new_sales",
"source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2",
"confidence": 0.95
}
])
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging")
assert response.status_code == 200
data = response.json()
assert "mappings" in data
assert len(data["mappings"]) == 1
assert data["mappings"][0]["confidence"] == 0.95
# [/DEF:test_get_database_mappings_success:Function]
# [DEF:test_get_database_mappings_env_not_found:Function]
# @PRE: source_env_id and target_env_id are valid environment IDs
def test_get_database_mappings_env_not_found(mock_deps):
"""@PRE: source_env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t")
assert response.status_code == 404
# [/DEF:test_get_database_mappings_env_not_found:Function]
# [DEF:test_get_dashboard_tasks_history_filters_success:Function]
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
def test_get_dashboard_tasks_history_filters_success():
with patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
now = datetime.now(timezone.utc)
def test_get_dashboard_tasks_history_filters_success(mock_deps):
now = datetime.now(timezone.utc)
llm_task = MagicMock()
llm_task.id = "task-llm-1"
llm_task.plugin_id = "llm_dashboard_validation"
llm_task.status = "SUCCESS"
llm_task.started_at = now
llm_task.finished_at = now
llm_task.params = {"dashboard_id": "42", "environment_id": "prod"}
llm_task.result = {"summary": "LLM validation complete"}
llm_task = MagicMock()
llm_task.id = "task-llm-1"
llm_task.plugin_id = "llm_dashboard_validation"
llm_task.status = "SUCCESS"
llm_task.started_at = now
llm_task.finished_at = now
llm_task.params = {"dashboard_id": "42", "environment_id": "prod"}
llm_task.result = {"summary": "LLM validation complete"}
backup_task = MagicMock()
backup_task.id = "task-backup-1"
backup_task.plugin_id = "superset-backup"
backup_task.status = "RUNNING"
backup_task.started_at = now
backup_task.finished_at = None
backup_task.params = {"env": "prod", "dashboards": [42]}
backup_task.result = {}
backup_task = MagicMock()
backup_task.id = "task-backup-1"
backup_task.plugin_id = "superset-backup"
backup_task.status = "RUNNING"
backup_task.started_at = now
backup_task.finished_at = None
backup_task.params = {"env": "prod", "dashboards": [42]}
backup_task.result = {}
other_task = MagicMock()
other_task.id = "task-other"
other_task.plugin_id = "superset-backup"
other_task.status = "SUCCESS"
other_task.started_at = now
other_task.finished_at = now
other_task.params = {"env": "prod", "dashboards": [777]}
other_task.result = {}
other_task = MagicMock()
other_task.id = "task-other"
other_task.plugin_id = "superset-backup"
other_task.status = "SUCCESS"
other_task.started_at = now
other_task.finished_at = now
other_task.params = {"env": "prod", "dashboards": [777]}
other_task.result = {}
mock_task_mgr.return_value.get_all_tasks.return_value = [other_task, llm_task, backup_task]
mock_perm.return_value = lambda: True
mock_deps["task"].get_all_tasks.return_value = [other_task, llm_task, backup_task]
response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10")
response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"}
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"}
# [/DEF:test_get_dashboard_tasks_history_filters_success:Function]
# [DEF:test_get_dashboard_thumbnail_success:Function]
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
def test_get_dashboard_thumbnail_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
def test_get_dashboard_thumbnail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_response = MagicMock()

View File

@@ -11,6 +11,41 @@ from unittest.mock import MagicMock, patch, AsyncMock
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.datasets import DatasetsResponse, DatasetDetailResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user for get_current_user dependency overrides
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app)
@@ -20,41 +55,34 @@ client = TestClient(app)
# @TEST: GET /api/datasets returns 200 and valid schema
# @PRE: env_id exists
# @POST: Response matches DatasetsResponse schema
def test_get_datasets_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_resource_service") as mock_service, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock resource service response
mock_service.return_value.get_datasets_with_status.return_value = AsyncMock()(
return_value=[
{
"id": 1,
"table_name": "sales_data",
"schema": "public",
"database": "sales_db",
"mapped_fields": {"total": 10, "mapped": 5},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
)
# Mock permission
mock_perm.return_value = lambda: True
def test_get_datasets_success(mock_deps):
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock resource service response
mock_deps["resource"].get_datasets_with_status = AsyncMock(
return_value=[
{
"id": 1,
"table_name": "sales_data",
"schema": "public",
"database": "sales_db",
"mapped_fields": {"total": 10, "mapped": 5},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
)
response = client.get("/api/datasets?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "datasets" in data
assert len(data["datasets"]) >= 0
# Validate against Pydantic model
DatasetsResponse(**data)
response = client.get("/api/datasets?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "datasets" in data
assert len(data["datasets"]) >= 0
# Validate against Pydantic model
DatasetsResponse(**data)
# [/DEF:test_get_datasets_success:Function]
@@ -64,17 +92,13 @@ def test_get_datasets_success():
# @TEST: GET /api/datasets returns 404 if env_id missing
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_get_datasets_env_not_found():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
def test_get_datasets_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/datasets?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
response = client.get("/api/datasets?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_datasets_env_not_found:Function]
@@ -84,24 +108,25 @@ def test_get_datasets_env_not_found():
# @TEST: GET /api/datasets returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error
def test_get_datasets_invalid_pagination():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
def test_get_datasets_invalid_pagination(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Invalid page
response = client.get("/api/datasets?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/datasets?env_id=prod&page_size=0")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# Invalid page
response = client.get("/api/datasets?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size (too small)
response = client.get("/api/datasets?env_id=prod&page_size=0")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# @TEST_EDGE: page_size > 100 exceeds max
response = client.get("/api/datasets?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_datasets_invalid_pagination:Function]
@@ -111,36 +136,31 @@ def test_get_datasets_invalid_pagination():
# @TEST: POST /api/datasets/map-columns creates mapping task
# @PRE: Valid env_id, dataset_ids, source_type
# @POST: Returns task_id
def test_map_columns_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
def test_map_columns_success(mock_deps):
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-123"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1, 2, 3],
"source_type": "postgresql"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1, 2, 3],
"source_type": "postgresql"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_map_columns_success:Function]
@@ -150,21 +170,18 @@ def test_map_columns_success():
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
# @PRE: source_type is not 'postgresql' or 'xlsx'
# @POST: Returns 400 error
def test_map_columns_invalid_source_type():
with patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_perm.return_value = lambda: True
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1],
"source_type": "invalid"
}
)
assert response.status_code == 400
assert "Source type must be 'postgresql' or 'xlsx'" in response.json()["detail"]
def test_map_columns_invalid_source_type(mock_deps):
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1],
"source_type": "invalid"
}
)
assert response.status_code == 400
assert "Source type must be 'postgresql' or 'xlsx'" in response.json()["detail"]
# [/DEF:test_map_columns_invalid_source_type:Function]
@@ -174,39 +191,110 @@ def test_map_columns_invalid_source_type():
# @TEST: POST /api/datasets/generate-docs creates doc generation task
# @PRE: Valid env_id, dataset_ids, llm_provider
# @POST: Returns task_id
def test_generate_docs_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
def test_generate_docs_success(mock_deps):
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-456"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "prod",
"dataset_ids": [1],
"llm_provider": "openai"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "prod",
"dataset_ids": [1],
"llm_provider": "openai"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_generate_docs_success:Function]
# [DEF:test_map_columns_empty_ids:Function]
# @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty
# @POST: Returns 400 error
def test_map_columns_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty."""
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [],
"source_type": "postgresql"
}
)
assert response.status_code == 400
assert "At least one dataset ID must be provided" in response.json()["detail"]
# [/DEF:test_map_columns_empty_ids:Function]
# [DEF:test_generate_docs_empty_ids:Function]
# @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty
# @POST: Returns 400 error
def test_generate_docs_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty."""
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "prod",
"dataset_ids": [],
"llm_provider": "openai"
}
)
assert response.status_code == 400
assert "At least one dataset ID must be provided" in response.json()["detail"]
# [/DEF:test_generate_docs_empty_ids:Function]
# [DEF:test_generate_docs_env_not_found:Function]
# @TEST: POST /api/datasets/generate-docs returns 404 for missing env
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_generate_docs_env_not_found(mock_deps):
"""@PRE: env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "ghost",
"dataset_ids": [1],
"llm_provider": "openai"
}
)
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_generate_docs_env_not_found:Function]
# [DEF:test_get_datasets_superset_failure:Function]
# @TEST_EDGE: external_superset_failure -> {status: 503}
def test_get_datasets_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_datasets_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/datasets?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch datasets" in response.json()["detail"]
# [/DEF:test_get_datasets_superset_failure:Function]
# [/DEF:backend.src.api.routes.__tests__.test_datasets:Module]

View File

@@ -407,4 +407,104 @@ async def test_execute_migration_invalid_env_raises_400(_mock_env):
assert exc.value.status_code == 400
@pytest.mark.asyncio
async def test_dry_run_migration_returns_diff_and_risk(db_session):
# @TEST_EDGE: missing_target_datasource -> validates high risk item generation
# @TEST_EDGE: breaking_reference -> validates high risk on missing dataset link
from src.api.routes.migration import dry_run_migration
from src.models.dashboard import DashboardSelection
env_source = MagicMock()
env_source.id = "src"
env_source.name = "Source"
env_source.url = "http://source"
env_source.username = "admin"
env_source.password = "admin"
env_source.verify_ssl = False
env_source.timeout = 30
env_target = MagicMock()
env_target.id = "tgt"
env_target.name = "Target"
env_target.url = "http://target"
env_target.username = "admin"
env_target.password = "admin"
env_target.verify_ssl = False
env_target.timeout = 30
cm = _make_sync_config_manager([env_source, env_target])
selection = DashboardSelection(
selected_ids=[42],
source_env_id="src",
target_env_id="tgt",
replace_db_config=False,
fix_cross_filters=True,
)
with patch("src.api.routes.migration.SupersetClient") as MockClient, \
patch("src.api.routes.migration.MigrationDryRunService") as MockService:
source_client = MagicMock()
target_client = MagicMock()
MockClient.side_effect = [source_client, target_client]
service_instance = MagicMock()
service_payload = {
"generated_at": "2026-02-27T00:00:00+00:00",
"selection": selection.model_dump(),
"selected_dashboard_titles": ["Sales"],
"diff": {
"dashboards": {"create": [], "update": [{"uuid": "dash-1"}], "delete": []},
"charts": {"create": [{"uuid": "chart-1"}], "update": [], "delete": []},
"datasets": {"create": [{"uuid": "dataset-1"}], "update": [], "delete": []},
},
"summary": {
"dashboards": {"create": 0, "update": 1, "delete": 0},
"charts": {"create": 1, "update": 0, "delete": 0},
"datasets": {"create": 1, "update": 0, "delete": 0},
"selected_dashboards": 1,
},
"risk": {
"score": 75,
"level": "high",
"items": [
{"code": "missing_datasource"},
{"code": "breaking_reference"},
],
},
}
service_instance.run.return_value = service_payload
MockService.return_value = service_instance
result = await dry_run_migration(selection=selection, config_manager=cm, db=db_session, _=None)
assert result["summary"]["dashboards"]["update"] == 1
assert result["summary"]["charts"]["create"] == 1
assert result["summary"]["datasets"]["create"] == 1
assert result["risk"]["score"] > 0
assert any(item["code"] == "missing_datasource" for item in result["risk"]["items"])
assert any(item["code"] == "breaking_reference" for item in result["risk"]["items"])
@pytest.mark.asyncio
async def test_dry_run_migration_rejects_same_environment(db_session):
from src.api.routes.migration import dry_run_migration
from src.models.dashboard import DashboardSelection
env = MagicMock()
env.id = "same"
env.name = "Same"
env.url = "http://same"
env.username = "admin"
env.password = "admin"
env.verify_ssl = False
env.timeout = 30
cm = _make_sync_config_manager([env])
selection = DashboardSelection(selected_ids=[1], source_env_id="same", target_env_id="same")
with pytest.raises(HTTPException) as exc:
await dry_run_migration(selection=selection, config_manager=cm, db=db_session, _=None)
assert exc.value.status_code == 400
# [/DEF:backend.src.api.routes.__tests__.test_migration_routes:Module]

View File

@@ -1,6 +1,6 @@
# [DEF:backend.src.api.routes.dashboards:Module]
#
# @TIER: STANDARD
# @TIER: CRITICAL
# @SEMANTICS: api, dashboards, resources, hub
# @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status
# @LAYER: API
@@ -9,6 +9,27 @@
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
#
# @INVARIANT: All dashboard responses include git_status and last_task metadata
#
# @TEST_CONTRACT: DashboardsAPI -> {
# required_fields: {env_id: string, page: integer, page_size: integer},
# optional_fields: {search: string},
# invariants: ["Pagination must be valid", "Environment must exist"]
# }
#
# @TEST_FIXTURE: dashboard_list_happy -> {
# "env_id": "prod",
# "expected_count": 1,
# "dashboards": [{"id": 1, "title": "Main Revenue"}]
# }
#
# @TEST_EDGE: pagination_zero_page -> {"env_id": "prod", "page": 0, "status": 400}
# @TEST_EDGE: pagination_oversize -> {"env_id": "prod", "page_size": 101, "status": 400}
# @TEST_EDGE: missing_env -> {"env_id": "ghost", "status": 404}
# @TEST_EDGE: empty_dashboards -> {"env_id": "empty_env", "expected_total": 0}
# @TEST_EDGE: external_superset_failure -> {"env_id": "bad_conn", "status": 503}
#
# @TEST_INVARIANT: metadata_consistency -> verifies: [dashboard_list_happy, empty_dashboards]
#
# [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException, Query, Response
@@ -219,10 +240,23 @@ async def get_dashboards(
async def get_database_mappings(
source_env_id: str,
target_env_id: str,
config_manager=Depends(get_config_manager),
mapping_service=Depends(get_mapping_service),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"):
# Validate environments exist
environments = config_manager.get_environments()
source_env = next((e for e in environments if e.id == source_env_id), None)
target_env = next((e for e in environments if e.id == target_env_id), None)
if not source_env:
logger.error(f"[get_database_mappings][Coherence:Failed] Source environment not found: {source_env_id}")
raise HTTPException(status_code=404, detail="Source environment not found")
if not target_env:
logger.error(f"[get_database_mappings][Coherence:Failed] Target environment not found: {target_env_id}")
raise HTTPException(status_code=404, detail="Target environment not found")
try:
# Get mapping suggestions using MappingService
suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id)

View File

@@ -14,6 +14,7 @@ from ...core.database import get_db
from ...models.dashboard import DashboardMetadata, DashboardSelection
from ...core.superset_client import SupersetClient
from ...core.logger import belief_scope
from ...core.migration.dry_run_orchestrator import MigrationDryRunService
from ...core.mapping_service import IdMappingService
from ...models.mapping import ResourceMapping
@@ -83,6 +84,44 @@ async def execute_migration(
raise HTTPException(status_code=500, detail=f"Failed to create migration task: {str(e)}")
# [/DEF:execute_migration:Function]
# [DEF:dry_run_migration:Function]
# @PURPOSE: Build pre-flight diff and risk summary without applying migration.
# @PRE: Selection and environments are valid.
# @POST: Returns deterministic JSON diff and risk scoring.
@router.post("/migration/dry-run", response_model=Dict[str, Any])
async def dry_run_migration(
selection: DashboardSelection,
config_manager=Depends(get_config_manager),
db: Session = Depends(get_db),
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
):
with belief_scope("dry_run_migration"):
environments = config_manager.get_environments()
env_map = {env.id: env for env in environments}
source_env = env_map.get(selection.source_env_id)
target_env = env_map.get(selection.target_env_id)
if not source_env or not target_env:
raise HTTPException(status_code=400, detail="Invalid source or target environment")
if selection.source_env_id == selection.target_env_id:
raise HTTPException(status_code=400, detail="Source and target environments must be different")
if not selection.selected_ids:
raise HTTPException(status_code=400, detail="No dashboards selected for dry run")
service = MigrationDryRunService()
source_client = SupersetClient(source_env)
target_client = SupersetClient(target_env)
try:
return service.run(
selection=selection,
source_client=source_client,
target_client=target_client,
db=db,
)
except ValueError as exc:
raise HTTPException(status_code=500, detail=str(exc)) from exc
# [/DEF:dry_run_migration:Function]
# [DEF:get_migration_settings:Function]
# @PURPOSE: Get current migration Cron string explicitly.
@router.get("/migration/settings", response_model=Dict[str, str])
@@ -221,4 +260,4 @@ async def trigger_sync_now(
}
# [/DEF:trigger_sync_now:Function]
# [/DEF:backend.src.api.routes.migration:Module]
# [/DEF:backend.src.api.routes.migration:Module]

View File

@@ -14,6 +14,8 @@ import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from src.core.database import Base
# Import all models to ensure they are registered with Base before create_all - must import both auth and mapping to ensure Base knows about all tables
from src.models import mapping, auth, task, report
from src.models.auth import User, Role, Permission, ADGroupMapping
from src.services.auth_service import AuthService
from src.core.auth.repository import AuthRepository
@@ -176,4 +178,94 @@ def test_ad_group_mapping(auth_repo):
assert retrieved_mapping.role_id == role.id
def test_authenticate_user_updates_last_login(auth_service, auth_repo):
"""@SIDE_EFFECT: authenticate_user updates last_login timestamp on success."""
user = User(
username="loginuser",
email="login@example.com",
password_hash=get_password_hash("mypassword"),
auth_source="LOCAL"
)
auth_repo.db.add(user)
auth_repo.db.commit()
assert user.last_login is None
authenticated = auth_service.authenticate_user("loginuser", "mypassword")
assert authenticated is not None
assert authenticated.last_login is not None
def test_authenticate_inactive_user(auth_service, auth_repo):
"""@PRE: User with is_active=False should not authenticate."""
user = User(
username="inactive_user",
email="inactive@example.com",
password_hash=get_password_hash("testpass"),
auth_source="LOCAL",
is_active=False
)
auth_repo.db.add(user)
auth_repo.db.commit()
result = auth_service.authenticate_user("inactive_user", "testpass")
assert result is None
def test_verify_password_empty_hash():
"""@PRE: verify_password with empty/None hash returns False."""
assert verify_password("anypassword", "") is False
assert verify_password("anypassword", None) is False
def test_provision_adfs_user_new(auth_service, auth_repo):
"""@POST: provision_adfs_user creates a new ADFS user with correct roles."""
# Set up a role and AD group mapping
role = Role(name="ADFS_Viewer", description="ADFS viewer role")
auth_repo.db.add(role)
auth_repo.db.commit()
mapping = ADGroupMapping(ad_group="DOMAIN\\Viewers", role_id=role.id)
auth_repo.db.add(mapping)
auth_repo.db.commit()
user_info = {
"upn": "newadfsuser@domain.com",
"email": "newadfsuser@domain.com",
"groups": ["DOMAIN\\Viewers"]
}
user = auth_service.provision_adfs_user(user_info)
assert user is not None
assert user.username == "newadfsuser@domain.com"
assert user.auth_source == "ADFS"
assert user.is_active is True
assert len(user.roles) == 1
assert user.roles[0].name == "ADFS_Viewer"
def test_provision_adfs_user_existing(auth_service, auth_repo):
"""@POST: provision_adfs_user updates roles for existing user."""
# Create existing user
existing = User(
username="existingadfs@domain.com",
email="existingadfs@domain.com",
auth_source="ADFS",
is_active=True
)
auth_repo.db.add(existing)
auth_repo.db.commit()
user_info = {
"upn": "existingadfs@domain.com",
"email": "existingadfs@domain.com",
"groups": []
}
user = auth_service.provision_adfs_user(user_info)
assert user is not None
assert user.username == "existingadfs@domain.com"
assert len(user.roles) == 0 # No matching group mappings
# [/DEF:test_auth:Module]

View File

@@ -0,0 +1,12 @@
# [DEF:backend.src.core.migration.__init__:Module]
# @TIER: TRIVIAL
# @SEMANTICS: migration, package, exports
# @PURPOSE: Namespace package for migration pre-flight orchestration components.
# @LAYER: Core
from .dry_run_orchestrator import MigrationDryRunService
from .archive_parser import MigrationArchiveParser
__all__ = ["MigrationDryRunService", "MigrationArchiveParser"]
# [/DEF:backend.src.core.migration.__init__:Module]

View File

@@ -0,0 +1,139 @@
# [DEF:backend.src.core.migration.archive_parser:Module]
# @TIER: STANDARD
# @SEMANTICS: migration, zip, parser, yaml, metadata
# @PURPOSE: Parse Superset export ZIP archives into normalized object catalogs for diffing.
# @LAYER: Core
# @RELATION: DEPENDS_ON -> backend.src.core.logger
# @INVARIANT: Parsing is read-only and never mutates archive files.
import json
import tempfile
import zipfile
from pathlib import Path
from typing import Any, Dict, List, Optional
import yaml
from ..logger import logger, belief_scope
# [DEF:MigrationArchiveParser:Class]
# @PURPOSE: Extract normalized dashboards/charts/datasets metadata from ZIP archives.
class MigrationArchiveParser:
# [DEF:extract_objects_from_zip:Function]
# @PURPOSE: Extract object catalogs from Superset archive.
# @PRE: zip_path points to a valid readable ZIP.
# @POST: Returns object lists grouped by resource type.
# @RETURN: Dict[str, List[Dict[str, Any]]]
def extract_objects_from_zip(self, zip_path: str) -> Dict[str, List[Dict[str, Any]]]:
with belief_scope("MigrationArchiveParser.extract_objects_from_zip"):
result: Dict[str, List[Dict[str, Any]]] = {
"dashboards": [],
"charts": [],
"datasets": [],
}
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(temp_dir)
result["dashboards"] = self._collect_yaml_objects(temp_dir, "dashboards")
result["charts"] = self._collect_yaml_objects(temp_dir, "charts")
result["datasets"] = self._collect_yaml_objects(temp_dir, "datasets")
return result
# [/DEF:extract_objects_from_zip:Function]
# [DEF:_collect_yaml_objects:Function]
# @PURPOSE: Read and normalize YAML manifests for one object type.
# @PRE: object_type is one of dashboards/charts/datasets.
# @POST: Returns only valid normalized objects.
def _collect_yaml_objects(self, root_dir: Path, object_type: str) -> List[Dict[str, Any]]:
with belief_scope("MigrationArchiveParser._collect_yaml_objects"):
files = list(root_dir.glob(f"**/{object_type}/**/*.yaml")) + list(root_dir.glob(f"**/{object_type}/*.yaml"))
objects: List[Dict[str, Any]] = []
for file_path in set(files):
try:
with open(file_path, "r") as file_obj:
payload = yaml.safe_load(file_obj) or {}
normalized = self._normalize_object_payload(payload, object_type)
if normalized:
objects.append(normalized)
except Exception as exc:
logger.reflect(
"[MigrationArchiveParser._collect_yaml_objects][REFLECT] skip_invalid_yaml path=%s error=%s",
file_path,
exc,
)
return objects
# [/DEF:_collect_yaml_objects:Function]
# [DEF:_normalize_object_payload:Function]
# @PURPOSE: Convert raw YAML payload to stable diff signature shape.
# @PRE: payload is parsed YAML mapping.
# @POST: Returns normalized descriptor with `uuid`, `title`, and `signature`.
def _normalize_object_payload(self, payload: Dict[str, Any], object_type: str) -> Optional[Dict[str, Any]]:
with belief_scope("MigrationArchiveParser._normalize_object_payload"):
if not isinstance(payload, dict):
return None
uuid = payload.get("uuid")
if not uuid:
return None
if object_type == "dashboards":
title = payload.get("dashboard_title") or payload.get("title")
signature = {
"title": title,
"slug": payload.get("slug"),
"position_json": payload.get("position_json"),
"json_metadata": payload.get("json_metadata"),
"description": payload.get("description"),
"owners": payload.get("owners"),
}
return {
"uuid": str(uuid),
"title": title or f"Dashboard {uuid}",
"signature": json.dumps(signature, sort_keys=True, default=str),
"owners": payload.get("owners") or [],
}
if object_type == "charts":
title = payload.get("slice_name") or payload.get("name")
signature = {
"title": title,
"viz_type": payload.get("viz_type"),
"params": payload.get("params"),
"query_context": payload.get("query_context"),
"datasource_uuid": payload.get("datasource_uuid"),
"dataset_uuid": payload.get("dataset_uuid"),
}
return {
"uuid": str(uuid),
"title": title or f"Chart {uuid}",
"signature": json.dumps(signature, sort_keys=True, default=str),
"dataset_uuid": payload.get("datasource_uuid") or payload.get("dataset_uuid"),
}
if object_type == "datasets":
title = payload.get("table_name") or payload.get("name")
signature = {
"title": title,
"schema": payload.get("schema"),
"database_uuid": payload.get("database_uuid"),
"sql": payload.get("sql"),
"columns": payload.get("columns"),
"metrics": payload.get("metrics"),
}
return {
"uuid": str(uuid),
"title": title or f"Dataset {uuid}",
"signature": json.dumps(signature, sort_keys=True, default=str),
"database_uuid": payload.get("database_uuid"),
}
return None
# [/DEF:_normalize_object_payload:Function]
# [/DEF:MigrationArchiveParser:Class]
# [/DEF:backend.src.core.migration.archive_parser:Module]

View File

@@ -0,0 +1,235 @@
# [DEF:backend.src.core.migration.dry_run_orchestrator:Module]
# @TIER: STANDARD
# @SEMANTICS: migration, dry_run, diff, risk, superset
# @PURPOSE: Compute pre-flight migration diff and risk scoring without apply.
# @LAYER: Core
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
# @RELATION: DEPENDS_ON -> backend.src.core.migration_engine
# @RELATION: DEPENDS_ON -> backend.src.core.migration.archive_parser
# @RELATION: DEPENDS_ON -> backend.src.core.migration.risk_assessor
# @INVARIANT: Dry run is informative only and must not mutate target environment.
from datetime import datetime, timezone
import json
from typing import Any, Dict, List
from sqlalchemy.orm import Session
from ...models.dashboard import DashboardSelection
from ...models.mapping import DatabaseMapping
from ..logger import logger, belief_scope
from .archive_parser import MigrationArchiveParser
from .risk_assessor import build_risks, score_risks
from ..migration_engine import MigrationEngine
from ..superset_client import SupersetClient
from ..utils.fileio import create_temp_file
# [DEF:MigrationDryRunService:Class]
# @PURPOSE: Build deterministic diff/risk payload for migration pre-flight.
class MigrationDryRunService:
# [DEF:__init__:Function]
# @PURPOSE: Wire parser dependency for archive object extraction.
# @PRE: parser can be omitted to use default implementation.
# @POST: Service is ready to calculate dry-run payload.
def __init__(self, parser: MigrationArchiveParser | None = None):
self.parser = parser or MigrationArchiveParser()
# [/DEF:__init__:Function]
# [DEF:run:Function]
# @PURPOSE: Execute full dry-run computation for selected dashboards.
# @PRE: source/target clients are authenticated and selection validated by caller.
# @POST: Returns JSON-serializable pre-flight payload with summary, diff and risk.
# @SIDE_EFFECT: Reads source export archives and target metadata via network.
def run(
self,
selection: DashboardSelection,
source_client: SupersetClient,
target_client: SupersetClient,
db: Session,
) -> Dict[str, Any]:
with belief_scope("MigrationDryRunService.run"):
logger.explore("[MigrationDryRunService.run][EXPLORE] starting dry-run pipeline")
engine = MigrationEngine()
db_mapping = self._load_db_mapping(db, selection) if selection.replace_db_config else {}
transformed = {"dashboards": {}, "charts": {}, "datasets": {}}
dashboards_preview = source_client.get_dashboards_summary()
selected_preview = {
item["id"]: item
for item in dashboards_preview
if item.get("id") in selection.selected_ids
}
for dashboard_id in selection.selected_ids:
exported_content, _ = source_client.export_dashboard(int(dashboard_id))
with create_temp_file(content=exported_content, suffix=".zip") as source_zip:
with create_temp_file(suffix=".zip") as transformed_zip:
success = engine.transform_zip(
str(source_zip),
str(transformed_zip),
db_mapping,
strip_databases=False,
target_env_id=selection.target_env_id,
fix_cross_filters=selection.fix_cross_filters,
)
if not success:
raise ValueError(f"Failed to transform export archive for dashboard {dashboard_id}")
extracted = self.parser.extract_objects_from_zip(str(transformed_zip))
self._accumulate_objects(transformed, extracted)
source_objects = {key: list(value.values()) for key, value in transformed.items()}
target_objects = self._build_target_signatures(target_client)
diff = {
"dashboards": self._build_object_diff(source_objects["dashboards"], target_objects["dashboards"]),
"charts": self._build_object_diff(source_objects["charts"], target_objects["charts"]),
"datasets": self._build_object_diff(source_objects["datasets"], target_objects["datasets"]),
}
risk = self._build_risks(source_objects, target_objects, diff, target_client)
summary = {
"dashboards": {action: len(diff["dashboards"][action]) for action in ("create", "update", "delete")},
"charts": {action: len(diff["charts"][action]) for action in ("create", "update", "delete")},
"datasets": {action: len(diff["datasets"][action]) for action in ("create", "update", "delete")},
"selected_dashboards": len(selection.selected_ids),
}
selected_titles = [
selected_preview[dash_id]["title"]
for dash_id in selection.selected_ids
if dash_id in selected_preview
]
logger.reason("[MigrationDryRunService.run][REASON] dry-run payload assembled")
return {
"generated_at": datetime.now(timezone.utc).isoformat(),
"selection": selection.model_dump(),
"selected_dashboard_titles": selected_titles,
"diff": diff,
"summary": summary,
"risk": score_risks(risk),
}
# [/DEF:run:Function]
# [DEF:_load_db_mapping:Function]
# @PURPOSE: Resolve UUID mapping for optional DB config replacement.
def _load_db_mapping(self, db: Session, selection: DashboardSelection) -> Dict[str, str]:
rows = db.query(DatabaseMapping).filter(
DatabaseMapping.source_env_id == selection.source_env_id,
DatabaseMapping.target_env_id == selection.target_env_id,
).all()
return {row.source_db_uuid: row.target_db_uuid for row in rows}
# [/DEF:_load_db_mapping:Function]
# [DEF:_accumulate_objects:Function]
# @PURPOSE: Merge extracted resources by UUID to avoid duplicates.
def _accumulate_objects(self, target: Dict[str, Dict[str, Dict[str, Any]]], source: Dict[str, List[Dict[str, Any]]]) -> None:
for object_type in ("dashboards", "charts", "datasets"):
for item in source.get(object_type, []):
uuid = item.get("uuid")
if uuid:
target[object_type][str(uuid)] = item
# [/DEF:_accumulate_objects:Function]
# [DEF:_index_by_uuid:Function]
# @PURPOSE: Build UUID-index map for normalized resources.
def _index_by_uuid(self, objects: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
indexed: Dict[str, Dict[str, Any]] = {}
for obj in objects:
uuid = obj.get("uuid")
if uuid:
indexed[str(uuid)] = obj
return indexed
# [/DEF:_index_by_uuid:Function]
# [DEF:_build_object_diff:Function]
# @PURPOSE: Compute create/update/delete buckets by UUID+signature.
def _build_object_diff(self, source_objects: List[Dict[str, Any]], target_objects: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
target_index = self._index_by_uuid(target_objects)
created: List[Dict[str, Any]] = []
updated: List[Dict[str, Any]] = []
deleted: List[Dict[str, Any]] = []
for source_obj in source_objects:
source_uuid = str(source_obj.get("uuid"))
target_obj = target_index.get(source_uuid)
if not target_obj:
created.append({"uuid": source_uuid, "title": source_obj.get("title")})
continue
if source_obj.get("signature") != target_obj.get("signature"):
updated.append({
"uuid": source_uuid,
"title": source_obj.get("title"),
"target_title": target_obj.get("title"),
})
return {"create": created, "update": updated, "delete": deleted}
# [/DEF:_build_object_diff:Function]
# [DEF:_build_target_signatures:Function]
# @PURPOSE: Pull target metadata and normalize it into comparable signatures.
def _build_target_signatures(self, client: SupersetClient) -> Dict[str, List[Dict[str, Any]]]:
_, dashboards = client.get_dashboards(query={
"columns": ["uuid", "dashboard_title", "slug", "position_json", "json_metadata", "description", "owners"],
})
_, datasets = client.get_datasets(query={
"columns": ["uuid", "table_name", "schema", "database_uuid", "sql", "columns", "metrics"],
})
_, charts = client.get_charts(query={
"columns": ["uuid", "slice_name", "viz_type", "params", "query_context", "datasource_uuid", "dataset_uuid"],
})
return {
"dashboards": [{
"uuid": str(item.get("uuid")),
"title": item.get("dashboard_title"),
"owners": item.get("owners") or [],
"signature": json.dumps({
"title": item.get("dashboard_title"),
"slug": item.get("slug"),
"position_json": item.get("position_json"),
"json_metadata": item.get("json_metadata"),
"description": item.get("description"),
"owners": item.get("owners"),
}, sort_keys=True, default=str),
} for item in dashboards if item.get("uuid")],
"datasets": [{
"uuid": str(item.get("uuid")),
"title": item.get("table_name"),
"database_uuid": item.get("database_uuid"),
"signature": json.dumps({
"title": item.get("table_name"),
"schema": item.get("schema"),
"database_uuid": item.get("database_uuid"),
"sql": item.get("sql"),
"columns": item.get("columns"),
"metrics": item.get("metrics"),
}, sort_keys=True, default=str),
} for item in datasets if item.get("uuid")],
"charts": [{
"uuid": str(item.get("uuid")),
"title": item.get("slice_name") or item.get("name"),
"dataset_uuid": item.get("datasource_uuid") or item.get("dataset_uuid"),
"signature": json.dumps({
"title": item.get("slice_name") or item.get("name"),
"viz_type": item.get("viz_type"),
"params": item.get("params"),
"query_context": item.get("query_context"),
"datasource_uuid": item.get("datasource_uuid"),
"dataset_uuid": item.get("dataset_uuid"),
}, sort_keys=True, default=str),
} for item in charts if item.get("uuid")],
}
# [/DEF:_build_target_signatures:Function]
# [DEF:_build_risks:Function]
# @PURPOSE: Build risk items for missing datasource, broken refs, overwrite, owner mismatch.
def _build_risks(
self,
source_objects: Dict[str, List[Dict[str, Any]]],
target_objects: Dict[str, List[Dict[str, Any]]],
diff: Dict[str, Dict[str, List[Dict[str, Any]]]],
target_client: SupersetClient,
) -> List[Dict[str, Any]]:
return build_risks(source_objects, target_objects, diff, target_client)
# [/DEF:_build_risks:Function]
# [/DEF:MigrationDryRunService:Class]
# [/DEF:backend.src.core.migration.dry_run_orchestrator:Module]

View File

@@ -0,0 +1,119 @@
# [DEF:backend.src.core.migration.risk_assessor:Module]
# @TIER: STANDARD
# @SEMANTICS: migration, dry_run, risk, scoring
# @PURPOSE: Risk evaluation helpers for migration pre-flight reporting.
# @LAYER: Core
# @RELATION: USED_BY -> backend.src.core.migration.dry_run_orchestrator
from typing import Any, Dict, List
from ..superset_client import SupersetClient
# [DEF:index_by_uuid:Function]
# @PURPOSE: Build UUID-index from normalized objects.
def index_by_uuid(objects: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
indexed: Dict[str, Dict[str, Any]] = {}
for obj in objects:
uuid = obj.get("uuid")
if uuid:
indexed[str(uuid)] = obj
return indexed
# [/DEF:index_by_uuid:Function]
# [DEF:extract_owner_identifiers:Function]
# @PURPOSE: Normalize owner payloads for stable comparison.
def extract_owner_identifiers(owners: Any) -> List[str]:
if not isinstance(owners, list):
return []
ids: List[str] = []
for owner in owners:
if isinstance(owner, dict):
if owner.get("username"):
ids.append(str(owner["username"]))
elif owner.get("id") is not None:
ids.append(str(owner["id"]))
elif owner is not None:
ids.append(str(owner))
return sorted(set(ids))
# [/DEF:extract_owner_identifiers:Function]
# [DEF:build_risks:Function]
# @PURPOSE: Build risk list from computed diffs and target catalog state.
def build_risks(
source_objects: Dict[str, List[Dict[str, Any]]],
target_objects: Dict[str, List[Dict[str, Any]]],
diff: Dict[str, Dict[str, List[Dict[str, Any]]]],
target_client: SupersetClient,
) -> List[Dict[str, Any]]:
risks: List[Dict[str, Any]] = []
for object_type in ("dashboards", "charts", "datasets"):
for item in diff[object_type]["update"]:
risks.append({
"code": "overwrite_existing",
"severity": "medium",
"object_type": object_type[:-1],
"object_uuid": item["uuid"],
"message": f"Object will be updated in target: {item.get('title') or item['uuid']}",
})
target_dataset_uuids = set(index_by_uuid(target_objects["datasets"]).keys())
_, target_databases = target_client.get_databases(query={"columns": ["uuid"]})
target_database_uuids = {str(item.get("uuid")) for item in target_databases if item.get("uuid")}
for dataset in source_objects["datasets"]:
db_uuid = dataset.get("database_uuid")
if db_uuid and str(db_uuid) not in target_database_uuids:
risks.append({
"code": "missing_datasource",
"severity": "high",
"object_type": "dataset",
"object_uuid": dataset.get("uuid"),
"message": f"Target datasource is missing for dataset {dataset.get('title') or dataset.get('uuid')}",
})
for chart in source_objects["charts"]:
ds_uuid = chart.get("dataset_uuid")
if ds_uuid and str(ds_uuid) not in target_dataset_uuids:
risks.append({
"code": "breaking_reference",
"severity": "high",
"object_type": "chart",
"object_uuid": chart.get("uuid"),
"message": f"Chart references dataset not found on target: {ds_uuid}",
})
source_dash = index_by_uuid(source_objects["dashboards"])
target_dash = index_by_uuid(target_objects["dashboards"])
for item in diff["dashboards"]["update"]:
source_obj = source_dash.get(item["uuid"])
target_obj = target_dash.get(item["uuid"])
if not source_obj or not target_obj:
continue
source_owners = extract_owner_identifiers(source_obj.get("owners"))
target_owners = extract_owner_identifiers(target_obj.get("owners"))
if source_owners and target_owners and source_owners != target_owners:
risks.append({
"code": "owner_mismatch",
"severity": "low",
"object_type": "dashboard",
"object_uuid": item["uuid"],
"message": f"Owner mismatch for dashboard {item.get('title') or item['uuid']}",
})
return risks
# [/DEF:build_risks:Function]
# [DEF:score_risks:Function]
# @PURPOSE: Aggregate risk list into score and level.
def score_risks(risk_items: List[Dict[str, Any]]) -> Dict[str, Any]:
weights = {"high": 25, "medium": 10, "low": 5}
score = min(100, sum(weights.get(item.get("severity", "low"), 5) for item in risk_items))
level = "low" if score < 25 else "medium" if score < 60 else "high"
return {"score": score, "level": level, "items": risk_items}
# [/DEF:score_risks:Function]
# [/DEF:backend.src.core.migration.risk_assessor:Module]

View File

@@ -336,6 +336,25 @@ class SupersetClient:
}
# [/DEF:get_dashboard_detail:Function]
# [DEF:get_charts:Function]
# @PURPOSE: Fetches all charts with pagination support.
# @PARAM: query (Optional[Dict]) - Optional query params/columns/filters.
# @PRE: Client is authenticated.
# @POST: Returns total count and charts list.
# @RETURN: Tuple[int, List[Dict]]
def get_charts(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
with belief_scope("get_charts"):
validated_query = self._validate_query_params(query or {})
if "columns" not in validated_query:
validated_query["columns"] = ["id", "uuid", "slice_name", "viz_type"]
paginated_data = self._fetch_all_pages(
endpoint="/chart/",
pagination_options={"base_query": validated_query, "results_field": "result"},
)
return len(paginated_data), paginated_data
# [/DEF:get_charts:Function]
# [DEF:_extract_chart_ids_from_layout:Function]
# @PURPOSE: Traverses dashboard layout metadata and extracts chart IDs from common keys.
# @PRE: payload can be dict/list/scalar.

View File

@@ -53,6 +53,7 @@ from ..logger import logger, belief_scope, should_log_task_level
# @TEST_EDGE: create_task_invalid_plugin -> raises ValueError
# @TEST_EDGE: create_task_invalid_params -> raises ValueError
# @TEST_INVARIANT: lifecycle_management -> verifies: [valid_manager]
class TaskManager:
"""
Manages the lifecycle of tasks, including their creation, execution, and state tracking.
"""

View File

@@ -10,7 +10,7 @@
},
"changed_by_name": "Superset Admin",
"changed_on": "2026-02-10T13:39:35.945662",
"changed_on_delta_humanized": "15 days ago",
"changed_on_delta_humanized": "16 days ago",
"charts": [
"TA-0001-001 test_chart"
],
@@ -19,7 +19,7 @@
"id": 1,
"last_name": "Admin"
},
"created_on_delta_humanized": "15 days ago",
"created_on_delta_humanized": "16 days ago",
"css": null,
"dashboard_title": "TA-0001 Test dashboard",
"id": 13,
@@ -54,7 +54,7 @@
"last_name": "Admin"
},
"changed_on": "2026-02-10T13:38:26.175551",
"changed_on_humanized": "15 days ago",
"changed_on_humanized": "16 days ago",
"column_formats": {},
"columns": [
{
@@ -424,7 +424,7 @@
"last_name": "Admin"
},
"created_on": "2026-02-10T13:38:26.050436",
"created_on_humanized": "15 days ago",
"created_on_humanized": "16 days ago",
"database": {
"allow_multi_catalog": false,
"backend": "postgresql",

View File

@@ -145,7 +145,9 @@ def test_get_git_status_for_dashboard_no_repo():
result = service._get_git_status_for_dashboard(123)
assert result is None
assert result is not None
assert result['sync_status'] == 'NO_REPO'
assert result['has_repo'] is False
# [/DEF:test_get_git_status_for_dashboard_no_repo:Function]
@@ -212,4 +214,38 @@ def test_extract_resource_name_from_task():
# [/DEF:test_extract_resource_name_from_task:Function]
# [DEF:test_get_last_task_for_resource_empty_tasks:Function]
# @TEST: _get_last_task_for_resource returns None for empty tasks list
# @PRE: tasks is empty list
# @POST: Returns None
def test_get_last_task_for_resource_empty_tasks():
from src.services.resource_service import ResourceService
service = ResourceService()
result = service._get_last_task_for_resource("dashboard-1", [])
assert result is None
# [/DEF:test_get_last_task_for_resource_empty_tasks:Function]
# [DEF:test_get_last_task_for_resource_no_match:Function]
# @TEST: _get_last_task_for_resource returns None when no tasks match resource_id
# @PRE: tasks list has no matching resource_id
# @POST: Returns None
def test_get_last_task_for_resource_no_match():
from src.services.resource_service import ResourceService
service = ResourceService()
task = MagicMock()
task.id = "task-999"
task.status = "SUCCESS"
task.params = {"resource_id": "dashboard-99"}
task.created_at = datetime(2024, 1, 1, 10, 0, 0)
result = service._get_last_task_for_resource("dashboard-1", [task])
assert result is None
# [/DEF:test_get_last_task_for_resource_no_match:Function]
# [/DEF:backend.src.services.__tests__.test_resource_service:Module]

View File

@@ -0,0 +1,62 @@
# [DEF:backend.tests.core.migration.test_archive_parser:Module]
#
# @TIER: STANDARD
# @PURPOSE: Unit tests for MigrationArchiveParser ZIP extraction contract.
# @LAYER: Domain
# @RELATION: VERIFIES -> backend.src.core.migration.archive_parser
#
import os
import sys
import tempfile
import zipfile
from pathlib import Path
import yaml
backend_dir = str(Path(__file__).parent.parent.parent.parent.resolve())
if backend_dir not in sys.path:
sys.path.insert(0, backend_dir)
from src.core.migration.archive_parser import MigrationArchiveParser
def test_extract_objects_from_zip_collects_all_types():
parser = MigrationArchiveParser()
with tempfile.TemporaryDirectory() as td:
td_path = Path(td)
zip_path = td_path / "objects.zip"
src_dir = td_path / "src"
(src_dir / "dashboards").mkdir(parents=True)
(src_dir / "charts").mkdir(parents=True)
(src_dir / "datasets").mkdir(parents=True)
with open(src_dir / "dashboards" / "dash.yaml", "w") as file_obj:
yaml.dump({"uuid": "dash-u1", "dashboard_title": "D1", "json_metadata": "{}"}, file_obj)
with open(src_dir / "charts" / "chart.yaml", "w") as file_obj:
yaml.dump({"uuid": "chart-u1", "slice_name": "C1", "viz_type": "bar"}, file_obj)
with open(src_dir / "datasets" / "dataset.yaml", "w") as file_obj:
yaml.dump({"uuid": "ds-u1", "table_name": "orders", "database_uuid": "db-u1"}, file_obj)
with zipfile.ZipFile(zip_path, "w") as zip_obj:
for root, _, files in os.walk(src_dir):
for file_name in files:
file_path = Path(root) / file_name
zip_obj.write(file_path, file_path.relative_to(src_dir))
extracted = parser.extract_objects_from_zip(str(zip_path))
if len(extracted["dashboards"]) != 1:
raise AssertionError("dashboards extraction size mismatch")
if extracted["dashboards"][0]["uuid"] != "dash-u1":
raise AssertionError("dashboard uuid mismatch")
if len(extracted["charts"]) != 1:
raise AssertionError("charts extraction size mismatch")
if extracted["charts"][0]["uuid"] != "chart-u1":
raise AssertionError("chart uuid mismatch")
if len(extracted["datasets"]) != 1:
raise AssertionError("datasets extraction size mismatch")
if extracted["datasets"][0]["uuid"] != "ds-u1":
raise AssertionError("dataset uuid mismatch")
# [/DEF:backend.tests.core.migration.test_archive_parser:Module]

View File

@@ -0,0 +1,110 @@
# [DEF:backend.tests.core.migration.test_dry_run_orchestrator:Module]
#
# @TIER: STANDARD
# @PURPOSE: Unit tests for MigrationDryRunService diff and risk computation contracts.
# @LAYER: Domain
# @RELATION: VERIFIES -> backend.src.core.migration.dry_run_orchestrator
#
import json
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
backend_dir = str(Path(__file__).parent.parent.parent.parent.resolve())
if backend_dir not in sys.path:
sys.path.insert(0, backend_dir)
from src.core.migration.dry_run_orchestrator import MigrationDryRunService
from src.models.dashboard import DashboardSelection
from src.models.mapping import Base
def _load_fixture() -> dict:
fixture_path = Path(__file__).parents[2] / "fixtures" / "migration_dry_run_fixture.json"
return json.loads(fixture_path.read_text())
def _make_session():
engine = create_engine(
"sqlite:///:memory:",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
return Session()
def test_migration_dry_run_service_builds_diff_and_risk():
# @TEST_CONTRACT: dry_run_result_contract -> {
# required_fields: {diff: object, summary: object, risk: object},
# invariants: ["risk.score >= 0", "summary.selected_dashboards == len(selection.selected_ids)"]
# }
# @TEST_FIXTURE: migration_dry_run_fixture -> backend/tests/fixtures/migration_dry_run_fixture.json
# @TEST_EDGE: missing_target_datasource -> fixture.transformed_zip_objects.datasets[0].database_uuid
# @TEST_EDGE: breaking_reference -> fixture.transformed_zip_objects.charts[0].dataset_uuid
fixture = _load_fixture()
db = _make_session()
selection = DashboardSelection(
selected_ids=[42],
source_env_id="src",
target_env_id="tgt",
replace_db_config=False,
fix_cross_filters=True,
)
source_client = MagicMock()
source_client.get_dashboards_summary.return_value = fixture["source_dashboard_summary"]
source_client.export_dashboard.return_value = (b"PK\x03\x04", "source.zip")
target_client = MagicMock()
target_client.get_dashboards.return_value = (
len(fixture["target"]["dashboards"]),
fixture["target"]["dashboards"],
)
target_client.get_datasets.return_value = (
len(fixture["target"]["datasets"]),
fixture["target"]["datasets"],
)
target_client.get_charts.return_value = (
len(fixture["target"]["charts"]),
fixture["target"]["charts"],
)
target_client.get_databases.return_value = (
len(fixture["target"]["databases"]),
fixture["target"]["databases"],
)
parser = MagicMock()
parser.extract_objects_from_zip.return_value = fixture["transformed_zip_objects"]
service = MigrationDryRunService(parser=parser)
with patch("src.core.migration.dry_run_orchestrator.MigrationEngine") as EngineMock:
engine = MagicMock()
engine.transform_zip.return_value = True
EngineMock.return_value = engine
result = service.run(selection, source_client, target_client, db)
if "summary" not in result:
raise AssertionError("summary is missing in dry-run payload")
if result["summary"]["selected_dashboards"] != 1:
raise AssertionError("selected_dashboards summary mismatch")
if result["summary"]["dashboards"]["update"] != 1:
raise AssertionError("dashboard update count mismatch")
if result["summary"]["charts"]["create"] != 1:
raise AssertionError("chart create count mismatch")
if result["summary"]["datasets"]["create"] != 1:
raise AssertionError("dataset create count mismatch")
risk_codes = {item["code"] for item in result["risk"]["items"]}
if "missing_datasource" not in risk_codes:
raise AssertionError("missing_datasource risk is not detected")
if "breaking_reference" not in risk_codes:
raise AssertionError("breaking_reference risk is not detected")
# [/DEF:backend.tests.core.migration.test_dry_run_orchestrator:Module]

View File

@@ -0,0 +1,58 @@
{
"source_dashboard_summary": [
{
"id": 42,
"title": "Sales"
}
],
"target": {
"dashboards": [
{
"uuid": "dash-1",
"dashboard_title": "Sales Old",
"slug": "sales-old",
"position_json": "{}",
"json_metadata": "{}",
"description": "",
"owners": [
{
"username": "owner-a"
}
]
}
],
"datasets": [],
"charts": [],
"databases": []
},
"transformed_zip_objects": {
"dashboards": [
{
"uuid": "dash-1",
"title": "Sales New",
"signature": "{\"title\":\"Sales New\"}",
"owners": [
{
"username": "owner-b"
}
]
}
],
"charts": [
{
"uuid": "chart-1",
"title": "Chart A",
"signature": "{\"title\":\"Chart A\"}",
"dataset_uuid": "dataset-404"
}
],
"datasets": [
{
"uuid": "dataset-1",
"title": "orders",
"signature": "{\"title\":\"orders\"}",
"database_uuid": "db-missing"
}
]
}
}

View File

@@ -1,73 +1,366 @@
# [DEF:backend.tests.test_dashboards_api:Module]
# @TIER: STANDARD
# @PURPOSE: Contract-driven tests for Dashboard Hub API
# @PURPOSE: Comprehensive contract-driven tests for Dashboard Hub API
# @LAYER: Domain (Tests)
# @SEMANTICS: tests, dashboards, api, contract
# @RELATION: TESTS -> backend.src.api.routes.dashboards
# @SEMANTICS: tests, dashboards, api, contract, remediation
import pytest
from fastapi.testclient import TestClient
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock, patch, AsyncMock
from datetime import datetime, timezone
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
from src.api.routes.dashboards import DashboardsResponse, DashboardDetailResponse, DashboardTaskHistoryResponse, DatabaseMappingsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
# Overrides for specific permission checks
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("dashboards", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app)
# [DEF:test_get_dashboards_success:Function]
# @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists
# @POST: Response matches DashboardsResponse schema
def test_get_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# --- 1. get_dashboards tests ---
def test_get_dashboards_success(mock_deps):
"""Uses @TEST_FIXTURE: dashboard_list_happy data."""
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
# @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Main Revenue", "slug": "main-revenue", "git_status": {"branch": "main", "sync_status": "OK"}}
])
response = client.get("/api/dashboards?env_id=prod&page=1&page_size=10")
assert response.status_code == 200
data = response.json()
# exhaustive @POST assertions
assert "dashboards" in data
assert len(data["dashboards"]) == 1 # @TEST_FIXTURE: expected_count: 1
assert data["dashboards"][0]["title"] == "Main Revenue"
assert data["total"] == 1
assert data["page"] == 1
assert data["page_size"] == 10
assert data["total_pages"] == 1
# schema validation
DashboardsResponse(**data)
def test_get_dashboards_with_search(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing", "slug": "marketing"}
])
# Mock environment
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
def test_get_dashboards_empty(mock_deps):
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
mock_env = MagicMock()
mock_env.id = "empty_env"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[])
response = client.get("/api/dashboards?env_id=empty_env")
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1
DashboardsResponse(**data)
def test_get_dashboards_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"]
def test_get_dashboards_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_get_dashboards_invalid_pagination(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# page < 1
assert client.get("/api/dashboards?env_id=prod&page=0").status_code == 400
assert client.get("/api/dashboards?env_id=prod&page=-1").status_code == 400
# page_size < 1
assert client.get("/api/dashboards?env_id=prod&page_size=0").status_code == 400
# page_size > 100
assert client.get("/api/dashboards?env_id=prod&page_size=101").status_code == 400
# --- 2. get_database_mappings tests ---
def test_get_database_mappings_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[
{"source_db": "src", "target_db": "dst", "confidence": 0.9}
])
response = client.get("/api/dashboards/db-mappings?source_env_id=s&target_env_id=t")
assert response.status_code == 200
data = response.json()
assert len(data["mappings"]) == 1
assert data["mappings"][0]["confidence"] == 0.9
DatabaseMappingsResponse(**data)
def test_get_database_mappings_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t")
assert response.status_code == 404
# --- 3. get_dashboard_detail tests ---
def test_get_dashboard_detail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock resource service response
mock_service.return_value.get_dashboards_with_status.return_value = [
{
"id": 1,
"title": "Sales Report",
"slug": "sales",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
# Mock permission
mock_perm.return_value = lambda: True
mock_client = MagicMock()
detail_payload = {
"id": 42, "title": "Detail", "charts": [], "datasets": [],
"chart_count": 0, "dataset_count": 0
}
mock_client.get_dashboard_detail.return_value = detail_payload
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards?env_id=prod")
response = client.get("/api/dashboards/42?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "dashboards" in data
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
# Validate against Pydantic model
DashboardsResponse(**data)
assert data["id"] == 42
DashboardDetailResponse(**data)
# [/DEF:test_get_dashboards_success:Function]
def test_get_dashboard_detail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42?env_id=missing")
assert response.status_code == 404
# [DEF:test_get_dashboards_env_not_found:Function]
# @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_get_dashboards_env_not_found():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# --- 4. get_dashboard_tasks_history tests ---
def test_get_dashboard_tasks_history_success(mock_deps):
now = datetime.now(timezone.utc)
task1 = MagicMock(id="t1", plugin_id="superset-backup", status="SUCCESS", started_at=now, finished_at=None, params={"env": "prod", "dashboards": [42]}, result={})
mock_deps["task"].get_all_tasks.return_value = [task1]
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 1
DashboardTaskHistoryResponse(**data)
def test_get_dashboard_tasks_history_sorting(mock_deps):
"""@POST: Response contains sorted task history (newest first)."""
from datetime import timedelta
now = datetime.now(timezone.utc)
older = now - timedelta(hours=2)
newest = now
task_old = MagicMock(id="t-old", plugin_id="superset-backup", status="SUCCESS",
started_at=older, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
task_new = MagicMock(id="t-new", plugin_id="superset-backup", status="RUNNING",
started_at=newest, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
# Provide in wrong order to verify the endpoint sorts
mock_deps["task"].get_all_tasks.return_value = [task_old, task_new]
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert len(data["items"]) == 2
# Newest first
assert data["items"][0]["id"] == "t-new"
assert data["items"][1]["id"] == "t-old"
# --- 5. get_dashboard_thumbnail tests ---
def test_get_dashboard_thumbnail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_response = MagicMock(status_code=200, content=b"img", headers={"Content-Type": "image/png"})
mock_client.network.request.side_effect = lambda method, endpoint, **kw: {"image_url": "url"} if method == "POST" else mock_response
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 200
assert response.content == b"img"
def test_get_dashboard_thumbnail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42/thumbnail?env_id=missing")
assert response.status_code == 404
def test_get_dashboard_thumbnail_202(mock_deps):
"""@POST: Returns 202 when thumbnail is being prepared by Superset."""
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
# POST cache_dashboard_screenshot returns image_url
mock_client.network.request.side_effect = [
{"image_url": "/api/v1/dashboard/42/thumbnail/abc123/"}, # POST
MagicMock(status_code=202, json=lambda: {"message": "Thumbnail is being generated"},
headers={"Content-Type": "application/json"}) # GET thumbnail -> 202
]
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 202
assert "Thumbnail is being generated" in response.json()["message"]
# [/DEF:test_get_dashboards_env_not_found:Function]
# --- 6. migrate_dashboards tests ---
def test_migrate_dashboards_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="task-123"))
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "task-123"
def test_migrate_dashboards_pre_checks(mock_deps):
# Missing IDs
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": []
})
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
def test_migrate_dashboards_env_not_found(mock_deps):
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"]
# --- 7. backup_dashboards tests ---
def test_backup_dashboards_success(mock_deps):
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="backup-123"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "backup-123"
def test_backup_dashboards_pre_checks(mock_deps):
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": []
})
assert response.status_code == 400
def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/backup", json={
"env_id": "ghost", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_backup_dashboards_with_schedule(mock_deps):
"""@POST: If schedule is provided, a scheduled task is created."""
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="sched-456"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1], "schedule": "0 0 * * *"
})
assert response.status_code == 200
assert response.json()["task_id"] == "sched-456"
# Verify schedule was propagated to create_task
call_kwargs = mock_deps["task"].create_task.call_args
task_params = call_kwargs.kwargs.get("params") or call_kwargs[1].get("params", {})
assert task_params["schedule"] == "0 0 * * *"
# --- 8. Internal logic: _task_matches_dashboard ---
from src.api.routes.dashboards import _task_matches_dashboard
def test_task_matches_dashboard_logic():
task = MagicMock(plugin_id="superset-backup", params={"dashboards": [42], "env": "prod"})
assert _task_matches_dashboard(task, 42, "prod") is True
assert _task_matches_dashboard(task, 43, "prod") is False
assert _task_matches_dashboard(task, 42, "dev") is False
llm_task = MagicMock(plugin_id="llm_dashboard_validation", params={"dashboard_id": 42, "environment_id": "prod"})
assert _task_matches_dashboard(llm_task, 42, "prod") is True
assert _task_matches_dashboard(llm_task, 42, None) is True
# [/DEF:backend.tests.test_dashboards_api:Module]

View File

@@ -1,6 +1,6 @@
import pytest
from fastapi.testclient import TestClient
from unittest.mock import MagicMock
from unittest.mock import MagicMock, AsyncMock
from src.app import app
from src.dependencies import get_config_manager, get_task_manager, get_resource_service, has_permission
@@ -27,10 +27,10 @@ def mock_deps():
task_manager.get_all_tasks.return_value = []
# Mock dashboards
resource_service.get_dashboards_with_status.return_value = [
resource_service.get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales", "slug": "sales", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None},
{"id": 2, "title": "Marketing", "slug": "mkt", "git_status": None, "last_task": {"task_id": "t1", "status": "SUCCESS"}}
]
])
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
@@ -39,6 +39,10 @@ def mock_deps():
# Bypass permission check
mock_user = MagicMock()
mock_user.username = "testadmin"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
# Override both get_current_user and has_permission
from src.dependencies import get_current_user
@@ -85,9 +89,9 @@ def test_get_dashboards_search(mock_deps):
# @TEST: Negative - Service failure returns 503
def test_get_datasets_success(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None}
]
])
response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 200
@@ -102,10 +106,10 @@ def test_get_datasets_not_found(mock_deps):
assert response.status_code == 404
def test_get_datasets_search(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None},
{"id": 2, "table_name": "users", "schema": "public", "database": "db1", "mapped_fields": {"total": 5, "mapped": 5}, "last_task": None}
]
])
response = client.get("/api/datasets?env_id=env1&search=orders")
assert response.status_code == 200
@@ -114,10 +118,39 @@ def test_get_datasets_search(mock_deps):
assert data["datasets"][0]["table_name"] == "orders"
def test_get_datasets_service_failure(mock_deps):
mock_deps["resource"].get_datasets_with_status.side_effect = Exception("Superset down")
mock_deps["resource"].get_datasets_with_status = AsyncMock(side_effect=Exception("Superset down"))
response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 503
assert "Failed to fetch datasets" in response.json()["detail"]
# [/DEF:test_datasets_api:Test]
# [DEF:test_pagination_boundaries:Test]
# @PURPOSE: Verify pagination validation for GET endpoints
# @TEST: page<1 and page_size>100 return 400
def test_get_dashboards_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page -> {page:0, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
def test_get_dashboards_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize -> {page_size:101, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
def test_get_datasets_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page on datasets"""
response = client.get("/api/datasets?env_id=env1&page=0")
assert response.status_code == 400
def test_get_datasets_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize on datasets"""
response = client.get("/api/datasets?env_id=env1&page_size=101")
assert response.status_code == 400
# [/DEF:test_pagination_boundaries:Test]

View File

@@ -41,7 +41,7 @@ describe('AssistantChatPanel integration contract', () => {
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
expect(source).toContain('<!-- [DEF' + ':AssistantChatPanel:Component] -->');
expect(source).toContain('@TIER: STANDARD');
expect(source).toContain('@TIER: CRITICAL');
expect(source).toContain('@UX_STATE: LoadingHistory');
expect(source).toContain('@UX_STATE: Sending');
expect(source).toContain('@UX_STATE: Error');

View File

@@ -13,6 +13,12 @@ vi.mock('$lib/api/assistant', () => ({
sendAssistantMessage: vi.fn()
}));
vi.mock('$lib/api', () => ({
api: {
getLlmStatus: vi.fn(() => Promise.resolve({ configured: true }))
}
}));
vi.mock('$lib/toasts', () => ({
addToast: vi.fn()
}));
@@ -49,15 +55,16 @@ vi.mock('$lib/i18n', () => ({
describe('AssistantChatPanel confirmation functional tests', () => {
const mockMessage = {
id: 'msg-123',
message_id: 'msg-123',
role: 'assistant',
text: 'Confirm migration?',
created_at: new Date().toISOString(),
confirmation: {
id: 'conf-123',
type: 'migration_execute',
status: 'pending'
}
conversation_id: 'conv-1',
confirmation_id: 'conf-123',
actions: [
{ type: 'confirm', label: 'Confirm' },
{ type: 'cancel', label: 'Cancel' }
]
};
beforeEach(() => {
@@ -66,20 +73,16 @@ describe('AssistantChatPanel confirmation functional tests', () => {
it('renders action buttons and triggers confirm API call', async () => {
// Mock getAssistantHistory to return our message
api.getAssistantHistory.mockResolvedValue({
api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage],
total: 1,
has_next: false
});
}));
render(AssistantChatPanel);
// Wait for message to render
await waitFor(() => {
expect(screen.getByText('Confirm migration?')).toBeTruthy();
});
const confirmBtn = screen.getByText('Confirm');
const confirmBtn = await screen.findByText('Confirm', {}, { timeout: 3000 });
expect(confirmBtn).toBeTruthy();
await fireEvent.click(confirmBtn);
@@ -88,40 +91,38 @@ describe('AssistantChatPanel confirmation functional tests', () => {
});
it('triggers cancel API call when cancel button is clicked', async () => {
api.getAssistantHistory.mockResolvedValue({
api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage],
total: 1,
has_next: false
});
}));
render(AssistantChatPanel);
await waitFor(() => {
expect(screen.getByText('Cancel')).toBeTruthy();
});
const cancelBtn = screen.getByText('Cancel');
const cancelBtn = await screen.findByText('Cancel', {}, { timeout: 3000 });
await fireEvent.click(cancelBtn);
expect(api.cancelAssistantOperation).toHaveBeenCalledWith('conf-123');
});
it('shows toast error when action fails', async () => {
api.getAssistantHistory.mockResolvedValue({
api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage],
total: 1,
has_next: false
}));
api.confirmAssistantOperation.mockImplementation(async () => {
throw new Error('Network error');
});
api.confirmAssistantOperation.mockRejectedValue(new Error('Network error'));
render(AssistantChatPanel);
await waitFor(() => screen.getByText('Confirm'));
await fireEvent.click(screen.getByText('Confirm'));
const confirmBtn = await screen.findByText('Confirm', {}, { timeout: 3000 });
await fireEvent.click(confirmBtn);
await waitFor(() => {
// The component appends a failed message to the chat
expect(screen.getAllByText(/Network error/)).toBeTruthy();
});
}, { timeout: 3000 });
});
});

View File

@@ -24,6 +24,7 @@
import type {
DashboardMetadata,
DashboardSelection,
MigrationDryRunResult,
} from "../../types/dashboard";
import { t } from "$lib/i18n";
import { Button, Card, PageHeader } from "$lib/ui";
@@ -44,6 +45,8 @@
let mappings: any[] = [];
let suggestions: any[] = [];
let fetchingDbs = false;
let dryRunLoading = false;
let dryRunResult: MigrationDryRunResult | null = null;
// UI State for Modals
let showLogViewer = false;
@@ -253,6 +256,7 @@
error = "";
try {
dryRunResult = null;
const selection: DashboardSelection = {
selected_ids: selectedDashboardIds,
source_env_id: sourceEnvId,
@@ -296,6 +300,57 @@
}
}
// [/DEF:startMigration:Function]
// [DEF:startDryRun:Function]
/**
* @purpose Builds pre-flight diff and risk summary without applying migration.
* @pre source/target environments and selected dashboards are valid.
* @post dryRunResult is populated with backend response.
* @UX_STATE: Idle -> Dry Run button is enabled when selection is valid.
* @UX_STATE: Loading -> Dry Run button shows "Dry Run..." and stays disabled.
* @UX_STATE: Error -> error banner is displayed and dryRunResult resets to null.
* @UX_FEEDBACK: User sees summary cards + risk block + JSON details after success.
* @UX_RECOVERY: User can adjust selection and press Dry Run again.
*/
async function startDryRun() {
if (!sourceEnvId || !targetEnvId) {
error =
$t.migration?.select_both_envs ||
"Please select both source and target environments.";
return;
}
if (sourceEnvId === targetEnvId) {
error =
$t.migration?.different_envs ||
"Source and target environments must be different.";
return;
}
if (selectedDashboardIds.length === 0) {
error =
$t.migration?.select_dashboards ||
"Please select at least one dashboard to migrate.";
return;
}
error = "";
dryRunLoading = true;
try {
const selection: DashboardSelection = {
selected_ids: selectedDashboardIds,
source_env_id: sourceEnvId,
target_env_id: targetEnvId,
replace_db_config: replaceDb,
fix_cross_filters: fixCrossFilters,
};
dryRunResult = await api.postApi("/migration/dry-run", selection);
} catch (e) {
error = e.message;
dryRunResult = null;
} finally {
dryRunLoading = false;
}
}
// [/DEF:startDryRun:Function]
</script>
<!-- [SECTION: TEMPLATE] -->
@@ -417,15 +472,70 @@
</div>
{/if}
<Button
on:click={startMigration}
disabled={!sourceEnvId ||
!targetEnvId ||
sourceEnvId === targetEnvId ||
selectedDashboardIds.length === 0}
>
{$t.migration?.start }
</Button>
<div class="flex items-center gap-3">
<Button
variant="secondary"
on:click={startDryRun}
disabled={!sourceEnvId ||
!targetEnvId ||
sourceEnvId === targetEnvId ||
selectedDashboardIds.length === 0 ||
dryRunLoading}
>
{dryRunLoading ? "Dry Run..." : "Dry Run"}
</Button>
<Button
on:click={startMigration}
disabled={!sourceEnvId ||
!targetEnvId ||
sourceEnvId === targetEnvId ||
selectedDashboardIds.length === 0}
>
{$t.migration?.start || "Apply"}
</Button>
</div>
{#if dryRunResult}
<div class="mt-6 rounded-md border border-slate-200 bg-slate-50 p-4 space-y-3">
<h3 class="text-base font-semibold">Pre-flight Diff</h3>
<div class="grid grid-cols-1 md:grid-cols-3 gap-3 text-sm">
<div class="rounded border border-slate-200 bg-white p-3">
<div class="font-medium mb-1">Dashboards</div>
<div>create: {dryRunResult.summary.dashboards.create}</div>
<div>update: {dryRunResult.summary.dashboards.update}</div>
<div>delete: {dryRunResult.summary.dashboards.delete}</div>
</div>
<div class="rounded border border-slate-200 bg-white p-3">
<div class="font-medium mb-1">Charts</div>
<div>create: {dryRunResult.summary.charts.create}</div>
<div>update: {dryRunResult.summary.charts.update}</div>
<div>delete: {dryRunResult.summary.charts.delete}</div>
</div>
<div class="rounded border border-slate-200 bg-white p-3">
<div class="font-medium mb-1">Datasets</div>
<div>create: {dryRunResult.summary.datasets.create}</div>
<div>update: {dryRunResult.summary.datasets.update}</div>
<div>delete: {dryRunResult.summary.datasets.delete}</div>
</div>
</div>
<div class="rounded border border-slate-200 bg-white p-3 text-sm">
<div class="font-medium mb-1">Risk</div>
<div>
score: {dryRunResult.risk.score}, level: {dryRunResult.risk.level}
</div>
<div class="mt-1">
issues: {dryRunResult.risk.items.length}
</div>
</div>
<details class="rounded border border-slate-200 bg-white p-3">
<summary class="cursor-pointer font-medium">Diff JSON</summary>
<pre class="mt-2 max-h-72 overflow-auto text-xs">{JSON.stringify(dryRunResult, null, 2)}</pre>
</details>
</div>
{/if}
{/if}
</div>

View File

@@ -16,5 +16,48 @@ export interface DashboardSelection {
source_env_id: string;
target_env_id: string;
replace_db_config?: boolean;
fix_cross_filters?: boolean;
}
// [/DEF:DashboardTypes:Module]
export interface DiffObjectRef {
uuid: string;
title?: string;
target_title?: string;
}
export interface DiffBucket {
create: DiffObjectRef[];
update: DiffObjectRef[];
delete: DiffObjectRef[];
}
export interface DryRunRiskItem {
code: string;
severity: "low" | "medium" | "high";
object_type: string;
object_uuid: string;
message: string;
}
export interface MigrationDryRunResult {
generated_at: string;
selection: DashboardSelection;
selected_dashboard_titles: string[];
diff: {
dashboards: DiffBucket;
charts: DiffBucket;
datasets: DiffBucket;
};
summary: {
dashboards: Record<"create" | "update" | "delete", number>;
charts: Record<"create" | "update" | "delete", number>;
datasets: Record<"create" | "update" | "delete", number>;
selected_dashboards: number;
};
risk: {
score: number;
level: "low" | "medium" | "high";
items: DryRunRiskItem[];
};
}
// [/DEF:DashboardTypes:Module]

File diff suppressed because it is too large Load Diff