Compare commits
17 Commits
master
...
6d64124e88
| Author | SHA1 | Date | |
|---|---|---|---|
| 6d64124e88 | |||
| 3094a2b58b | |||
| ad6a7eb755 | |||
| 78f1e6803f | |||
| 3b22133d7a | |||
| 8728756a3f | |||
| 5f44435a4b | |||
| 43b9fe640d | |||
| ed3d5f3039 | |||
| 38bda6a714 | |||
| 18bdde0a81 | |||
| 023bacde39 | |||
| e916cb1f17 | |||
| c957207bce | |||
| f4416c3ebb | |||
| 9cae07a3b4 | |||
| 493a73827a |
@@ -2,12 +2,12 @@
|
||||
|
||||
> High-level module structure for AI Context. Generated automatically.
|
||||
|
||||
**Generated:** 2026-03-16T10:03:28.287790
|
||||
**Generated:** 2026-03-16T22:51:06.491000
|
||||
|
||||
## Summary
|
||||
|
||||
- **Total Modules:** 105
|
||||
- **Total Entities:** 3313
|
||||
- **Total Entities:** 3358
|
||||
|
||||
## Module Hierarchy
|
||||
|
||||
@@ -20,27 +20,34 @@
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- 📦 **backend.delete_running_tasks** (Module) `[TRIVIAL]`
|
||||
- 📦 **DeleteRunningTasksUtil** (Module) `[TRIVIAL]`
|
||||
- Script to delete tasks with RUNNING status from the database...
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> TaskRecord
|
||||
- 🔗 DEPENDS_ON -> TasksSessionLocal
|
||||
|
||||
### 📁 `src/`
|
||||
|
||||
- 🏗️ **Layers:** API, Core, UI (API)
|
||||
- 📊 **Tiers:** CRITICAL: 2, STANDARD: 6, TRIVIAL: 18
|
||||
- 📊 **Tiers:** CRITICAL: 2, STANDARD: 6, TRIVIAL: 20
|
||||
- 📄 **Files:** 3
|
||||
- 📦 **Entities:** 26
|
||||
- 📦 **Entities:** 28
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- 📦 **AppDependencies** (Module)
|
||||
- Manages creation and provision of shared application depende...
|
||||
- 📦 **AppModule** (Module) `[CRITICAL]`
|
||||
- The main entry point for the FastAPI application. It initial...
|
||||
- 📦 **backend.src.dependencies** (Module)
|
||||
- Manages creation and provision of shared application depende...
|
||||
- 📦 **SrcRoot** (Module) `[TRIVIAL]`
|
||||
- Canonical backend package root for application, scripts, and...
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> AppDependencies
|
||||
- 🔗 DEPENDS_ON -> backend.src.api.routes
|
||||
- 🔗 DEPENDS_ON -> backend.src.dependencies
|
||||
|
||||
### 📁 `api/`
|
||||
|
||||
@@ -51,18 +58,25 @@
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- 📦 **backend.src.api.auth** (Module)
|
||||
- 📦 **AuthApi** (Module)
|
||||
- Authentication API endpoints.
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> AuthRepository:Class
|
||||
- 🔗 DEPENDS_ON -> get_current_user
|
||||
|
||||
### 📁 `routes/`
|
||||
|
||||
- 🏗️ **Layers:** API, Infra, UI (API), UI/API
|
||||
- 📊 **Tiers:** CRITICAL: 7, STANDARD: 184, TRIVIAL: 111
|
||||
- 📊 **Tiers:** CRITICAL: 7, STANDARD: 191, TRIVIAL: 107
|
||||
- 📄 **Files:** 21
|
||||
- 📦 **Entities:** 302
|
||||
- 📦 **Entities:** 305
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- ℂ **ApprovalRequest** (Class) `[TRIVIAL]`
|
||||
- Schema for approval request payload.
|
||||
- ℂ **AssistantAction** (Class) `[TRIVIAL]`
|
||||
- UI action descriptor returned with assistant responses.
|
||||
- ℂ **AssistantMessageRequest** (Class) `[TRIVIAL]`
|
||||
@@ -81,46 +95,39 @@
|
||||
- Schema for staging and committing changes.
|
||||
- ℂ **CommitSchema** (Class) `[TRIVIAL]`
|
||||
- Schema for representing Git commit details.
|
||||
- ℂ **ConfirmationRecord** (Class)
|
||||
- In-memory confirmation token model for risky operation dispa...
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> AppDependencies
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.config_manager.ConfigManager
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.config_models
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.database
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.database.get_db
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.mapping_service.IdMappingService
|
||||
|
||||
### 📁 `__tests__/`
|
||||
|
||||
- 🏗️ **Layers:** API, Domain, Domain (Tests), Tests, UI (API Tests), Unknown
|
||||
- 📊 **Tiers:** STANDARD: 16, TRIVIAL: 275
|
||||
- 📊 **Tiers:** STANDARD: 16, TRIVIAL: 265
|
||||
- 📄 **Files:** 18
|
||||
- 📦 **Entities:** 291
|
||||
- 📦 **Entities:** 281
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- ℂ **_FakeConfigManager** (Class) `[TRIVIAL]`
|
||||
- Provide deterministic environment aliases required by intent...
|
||||
- ℂ **_FakeConfigManager** (Class) `[TRIVIAL]`
|
||||
- Environment config fixture with dev/prod aliases for parser ...
|
||||
- ℂ **_FakeDb** (Class) `[TRIVIAL]`
|
||||
- In-memory session substitute for assistant route persistence...
|
||||
- ℂ **_FakeDb** (Class) `[TRIVIAL]`
|
||||
- In-memory fake database implementing subset of Session inter...
|
||||
- ℂ **_FakeQuery** (Class) `[TRIVIAL]`
|
||||
- Minimal chainable query object for fake DB interactions.
|
||||
- ℂ **_FakeQuery** (Class) `[TRIVIAL]`
|
||||
- Minimal chainable query object for fake SQLAlchemy-like DB b...
|
||||
- ℂ **_FakeTask** (Class) `[TRIVIAL]`
|
||||
- Lightweight task model used for assistant authz tests.
|
||||
- ℂ **_FakeTask** (Class) `[TRIVIAL]`
|
||||
- Lightweight task stub used by assistant API tests.
|
||||
- ℂ **_FakeTaskManager** (Class) `[TRIVIAL]`
|
||||
- Minimal task manager for deterministic operation creation an...
|
||||
- ℂ **_FakeTaskManager** (Class) `[TRIVIAL]`
|
||||
- Minimal async-compatible TaskManager fixture for determinist...
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
@@ -130,9 +137,9 @@
|
||||
### 📁 `core/`
|
||||
|
||||
- 🏗️ **Layers:** Core, Domain, Infra
|
||||
- 📊 **Tiers:** CRITICAL: 9, STANDARD: 66, TRIVIAL: 134
|
||||
- 📊 **Tiers:** CRITICAL: 9, STANDARD: 49, TRIVIAL: 153
|
||||
- 📄 **Files:** 14
|
||||
- 📦 **Entities:** 209
|
||||
- 📦 **Entities:** 211
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
@@ -159,11 +166,11 @@
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> AppConfig
|
||||
- 🔗 DEPENDS_ON -> AppConfigRecord
|
||||
- 🔗 DEPENDS_ON -> SessionLocal
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.auth.config
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.config_models.AppConfig
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.config_models.Environment
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.database.SessionLocal
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.logger
|
||||
|
||||
### 📁 `__tests__/`
|
||||
|
||||
@@ -186,16 +193,18 @@
|
||||
### 📁 `auth/`
|
||||
|
||||
- 🏗️ **Layers:** Core, Domain
|
||||
- 📊 **Tiers:** CRITICAL: 17, STANDARD: 2, TRIVIAL: 10
|
||||
- 📊 **Tiers:** CRITICAL: 6, STANDARD: 2, TRIVIAL: 20
|
||||
- 📄 **Files:** 7
|
||||
- 📦 **Entities:** 29
|
||||
- 📦 **Entities:** 28
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
- ℂ **AuthConfig** (Class) `[CRITICAL]`
|
||||
- Holds authentication-related settings.
|
||||
- ℂ **AuthRepository** (Class) `[CRITICAL]`
|
||||
- Encapsulates database operations for authentication-related ...
|
||||
- Initialize repository with database session.
|
||||
- 📦 **AuthRepository** (Module) `[CRITICAL]`
|
||||
- Data access layer for authentication and user preference ent...
|
||||
- 📦 **backend.src.core.auth.config** (Module) `[CRITICAL]`
|
||||
- Centralized configuration for authentication and authorizati...
|
||||
- 📦 **backend.src.core.auth.jwt** (Module)
|
||||
@@ -204,18 +213,16 @@
|
||||
- Audit logging for security-related events.
|
||||
- 📦 **backend.src.core.auth.oauth** (Module) `[CRITICAL]`
|
||||
- ADFS OIDC configuration and client using Authlib.
|
||||
- 📦 **backend.src.core.auth.repository** (Module) `[CRITICAL]`
|
||||
- Data access layer for authentication and user preference ent...
|
||||
- 📦 **backend.src.core.auth.security** (Module) `[CRITICAL]`
|
||||
- Utility for password hashing and verification using Passlib.
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> Permission:Class
|
||||
- 🔗 DEPENDS_ON -> Role:Class
|
||||
- 🔗 DEPENDS_ON -> User:Class
|
||||
- 🔗 DEPENDS_ON -> UserDashboardPreference:Class
|
||||
- 🔗 DEPENDS_ON -> authlib
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.logger.belief_scope
|
||||
- 🔗 DEPENDS_ON -> backend.src.models.auth
|
||||
- 🔗 DEPENDS_ON -> backend.src.models.profile
|
||||
- 🔗 DEPENDS_ON -> jose
|
||||
|
||||
### 📁 `__tests__/`
|
||||
|
||||
@@ -304,10 +311,10 @@
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> Environment
|
||||
- 🔗 DEPENDS_ON -> PluginLoader:Class
|
||||
- 🔗 DEPENDS_ON -> TaskLogPersistenceService:Class
|
||||
- 🔗 DEPENDS_ON -> TaskLogRecord
|
||||
- 🔗 DEPENDS_ON -> TaskLogger, USED_BY -> plugins
|
||||
- 🔗 DEPENDS_ON -> TaskManager, CALLS -> TaskManager._add_log
|
||||
- 🔗 DEPENDS_ON -> TaskManagerModels
|
||||
|
||||
### 📁 `__tests__/`
|
||||
|
||||
@@ -326,7 +333,7 @@
|
||||
### 📁 `utils/`
|
||||
|
||||
- 🏗️ **Layers:** Core, Domain, Infra
|
||||
- 📊 **Tiers:** CRITICAL: 1, STANDARD: 10, TRIVIAL: 61
|
||||
- 📊 **Tiers:** CRITICAL: 1, STANDARD: 11, TRIVIAL: 60
|
||||
- 📄 **Files:** 6
|
||||
- 📦 **Entities:** 72
|
||||
|
||||
@@ -393,10 +400,10 @@
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- 🔗 DEPENDS_ON -> AuthModels
|
||||
- 🔗 DEPENDS_ON -> Role
|
||||
- 🔗 DEPENDS_ON -> TaskRecord
|
||||
- 🔗 DEPENDS_ON -> backend.src.core.task_manager.models
|
||||
- 🔗 DEPENDS_ON -> backend.src.models.auth
|
||||
- 🔗 DEPENDS_ON -> backend.src.models.mapping
|
||||
|
||||
### 📁 `__tests__/`
|
||||
@@ -618,9 +625,9 @@
|
||||
### 📁 `services/`
|
||||
|
||||
- 🏗️ **Layers:** Core, Domain, Domain/Service, Service
|
||||
- 📊 **Tiers:** CRITICAL: 5, STANDARD: 47, TRIVIAL: 118
|
||||
- 📊 **Tiers:** CRITICAL: 5, STANDARD: 47, TRIVIAL: 164
|
||||
- 📄 **Files:** 10
|
||||
- 📦 **Entities:** 170
|
||||
- 📦 **Entities:** 216
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
@@ -1346,9 +1353,9 @@
|
||||
### 📁 `layout/`
|
||||
|
||||
- 🏗️ **Layers:** UI, Unknown
|
||||
- 📊 **Tiers:** STANDARD: 7, TRIVIAL: 54
|
||||
- 📊 **Tiers:** STANDARD: 7, TRIVIAL: 55
|
||||
- 📄 **Files:** 5
|
||||
- 📦 **Entities:** 61
|
||||
- 📦 **Entities:** 62
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
@@ -2066,10 +2073,10 @@
|
||||
|
||||
### 📁 `root/`
|
||||
|
||||
- 🏗️ **Layers:** DevOps/Tooling
|
||||
- 📊 **Tiers:** CRITICAL: 11, STANDARD: 18, TRIVIAL: 11
|
||||
- 📄 **Files:** 1
|
||||
- 📦 **Entities:** 40
|
||||
- 🏗️ **Layers:** DevOps/Tooling, Unknown
|
||||
- 📊 **Tiers:** CRITICAL: 11, STANDARD: 18, TRIVIAL: 13
|
||||
- 📄 **Files:** 2
|
||||
- 📦 **Entities:** 42
|
||||
|
||||
**Key Entities:**
|
||||
|
||||
@@ -2087,16 +2094,14 @@
|
||||
- Legacy tier buckets retained for backward-compatible reporti...
|
||||
- 📦 **generate_semantic_map** (Module)
|
||||
- Scans the codebase to generate a Semantic Map, Module Map, a...
|
||||
- 📦 **merge_spec** (Module) `[TRIVIAL]`
|
||||
- Auto-generated module for merge_spec.py
|
||||
|
||||
## Cross-Module Dependencies
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
src-->|DEPENDS_ON|backend
|
||||
src-->|DEPENDS_ON|backend
|
||||
api-->|USES|backend
|
||||
api-->|USES|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|CALLS|backend
|
||||
@@ -2123,7 +2128,6 @@ graph TD
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|USES|backend
|
||||
routes-->|USES|backend
|
||||
routes-->|CALLS|backend
|
||||
@@ -2152,10 +2156,9 @@ graph TD
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|DEPENDS_ON|backend
|
||||
routes-->|CALLS|backend
|
||||
__tests__-->|TESTS|backend
|
||||
__tests__-->|VERIFIES|backend
|
||||
__tests__-->|TESTS|backend
|
||||
__tests__-->|TESTS|backend
|
||||
__tests__-->|TESTS|backend
|
||||
@@ -2177,11 +2180,6 @@ graph TD
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|CALLS|backend
|
||||
core-->|CALLS|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
core-->|INHERITS|backend
|
||||
core-->|DEPENDS_ON|backend
|
||||
@@ -2192,9 +2190,6 @@ graph TD
|
||||
auth-->|USES|backend
|
||||
auth-->|USES|backend
|
||||
auth-->|USES|backend
|
||||
auth-->|DEPENDS_ON|backend
|
||||
auth-->|DEPENDS_ON|backend
|
||||
auth-->|DEPENDS_ON|backend
|
||||
migration-->|DEPENDS_ON|backend
|
||||
migration-->|DEPENDS_ON|backend
|
||||
migration-->|DEPENDS_ON|backend
|
||||
@@ -2206,8 +2201,6 @@ graph TD
|
||||
task_manager-->|USED_BY|backend
|
||||
task_manager-->|USED_BY|backend
|
||||
task_manager-->|DEPENDS_ON|backend
|
||||
task_manager-->|DEPENDS_ON|backend
|
||||
task_manager-->|DEPENDS_ON|backend
|
||||
utils-->|DEPENDS_ON|backend
|
||||
utils-->|DEPENDS_ON|backend
|
||||
utils-->|CALLS|backend
|
||||
@@ -2220,9 +2213,6 @@ graph TD
|
||||
models-->|DEPENDS_ON|backend
|
||||
models-->|DEPENDS_ON|backend
|
||||
models-->|USED_BY|backend
|
||||
models-->|INHERITS_FROM|backend
|
||||
models-->|DEPENDS_ON|backend
|
||||
models-->|INHERITS_FROM|backend
|
||||
__tests__-->|TESTS|backend
|
||||
llm_analysis-->|IMPLEMENTS|backend
|
||||
llm_analysis-->|IMPLEMENTS|backend
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
> Compressed view for AI Context. Generated automatically.
|
||||
|
||||
- 📦 **merge_spec** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for merge_spec.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **merge_specs** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **generate_semantic_map** (`Module`)
|
||||
- 📝 Scans the codebase to generate a Semantic Map, Module Map, and Compliance Report based on the System Standard.
|
||||
- 🏗️ Layer: DevOps/Tooling
|
||||
@@ -653,6 +658,8 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **llmValidationBadgeClass** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **getStatusClass** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **stopTaskDetailsPolling** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **loadEnvironmentOptions** (`Function`) `[TRIVIAL]`
|
||||
@@ -2185,16 +2192,18 @@
|
||||
- 🔒 Invariant: Edit action keeps explicit click handler and opens normalized edit form.
|
||||
- ƒ **provider_config_edit_contract_tests** (`Function`)
|
||||
- 📝 Validate edit and delete handler wiring plus normalized edit form state mapping.
|
||||
- 📦 **backend.delete_running_tasks** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **DeleteRunningTasksUtil** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Script to delete tasks with RUNNING status from the database.
|
||||
- 🏗️ Layer: Utility
|
||||
- 🔗 DEPENDS_ON -> `TasksSessionLocal`
|
||||
- 🔗 DEPENDS_ON -> `TaskRecord`
|
||||
- ƒ **delete_running_tasks** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Delete all tasks with RUNNING status from the database.
|
||||
- 📦 **AppModule** (`Module`) `[CRITICAL]`
|
||||
- 📝 The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming.
|
||||
- 🏗️ Layer: UI (API)
|
||||
- 🔒 Invariant: All WebSocket connections must be properly cleaned up on disconnect.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.dependencies`
|
||||
- 🔗 DEPENDS_ON -> `AppDependencies`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.api.routes`
|
||||
- 📦 **App** (`Global`) `[TRIVIAL]`
|
||||
- 📝 The global FastAPI application instance.
|
||||
@@ -2202,10 +2211,14 @@
|
||||
- 📝 Handles application startup tasks, such as starting the scheduler.
|
||||
- ƒ **shutdown_event** (`Function`)
|
||||
- 📝 Handles application shutdown tasks, such as stopping the scheduler.
|
||||
- ▦ **app_middleware** (`Block`) `[TRIVIAL]`
|
||||
- 📝 Configure application-wide middleware (Session, CORS).
|
||||
- ƒ **network_error_handler** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Global exception handler for NetworkError.
|
||||
- ƒ **log_requests** (`Function`)
|
||||
- 📝 Middleware to log incoming HTTP requests and their response status.
|
||||
- ▦ **api_routes** (`Block`) `[TRIVIAL]`
|
||||
- 📝 Register all application API routers.
|
||||
- 📦 **api.include_routers** (`Action`) `[TRIVIAL]`
|
||||
- 📝 Registers all API routers with the FastAPI application.
|
||||
- 🏗️ Layer: API
|
||||
@@ -2219,9 +2232,18 @@
|
||||
- 📝 A simple root endpoint to confirm that the API is running when frontend is missing.
|
||||
- ƒ **matches_filters** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.dependencies** (`Module`)
|
||||
- 📦 **AppDependencies** (`Module`)
|
||||
- 📝 Manages creation and provision of shared application dependencies, such as PluginLoader and TaskManager, to avoid circular imports.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔗 CALLS -> `CleanReleaseRepository`
|
||||
- 🔗 CALLS -> `ConfigManager`
|
||||
- 🔗 CALLS -> `PluginLoader`
|
||||
- 🔗 CALLS -> `SchedulerService`
|
||||
- 🔗 CALLS -> `TaskManager`
|
||||
- 🔗 CALLS -> `get_all_plugin_configs`
|
||||
- 🔗 CALLS -> `get_db`
|
||||
- 🔗 CALLS -> `info`
|
||||
- 🔗 CALLS -> `init_db`
|
||||
- ƒ **get_config_manager** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Dependency injector for ConfigManager.
|
||||
- ƒ **get_plugin_loader** (`Function`) `[TRIVIAL]`
|
||||
@@ -2246,7 +2268,7 @@
|
||||
- 📝 Dependency for checking if the current user has a specific permission.
|
||||
- ƒ **permission_checker** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **src** (`Package`) `[TRIVIAL]`
|
||||
- 📦 **SrcRoot** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Canonical backend package root for application, scripts, and tests.
|
||||
- 📦 **backend.src.scripts.seed_superset_load_test** (`Module`)
|
||||
- 📝 Creates randomized load-test data in Superset by cloning chart configurations and creating dashboards in target environments.
|
||||
@@ -2521,9 +2543,103 @@
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.config_models.Environment`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.__init__** (`Function`)
|
||||
- 📝 Инициализирует клиент, проверяет конфигурацию и создает сетевой клиент.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.authenticate** (`Function`)
|
||||
- 📝 Authenticates the client using the configured credentials.
|
||||
- 🔗 CALLS -> `self.network.authenticate`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.authenticate** (`Function`)
|
||||
- 📝 Authenticates the client using the configured credentials.
|
||||
- 🔗 CALLS -> `self.network.authenticate`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.headers** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Возвращает базовые HTTP-заголовки, используемые сетевым клиентом.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboards** (`Function`)
|
||||
- 📝 Получает полный список дашбордов, автоматически обрабатывая пагинацию.
|
||||
- 🔗 CALLS -> `self._fetch_all_pages`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboards_page** (`Function`)
|
||||
- 📝 Fetches a single dashboards page from Superset without iterating all pages.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboards_summary** (`Function`)
|
||||
- 📝 Fetches dashboard metadata optimized for the grid.
|
||||
- 🔗 CALLS -> `self.get_dashboards`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboards_summary_page** (`Function`)
|
||||
- 📝 Fetches one page of dashboard metadata optimized for the grid.
|
||||
- 🔗 CALLS -> `self.get_dashboards_page`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._extract_owner_labels** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Normalize dashboard owners payload to stable display labels.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._extract_user_display** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Normalize user payload to a stable display name.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._sanitize_user_text** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Convert scalar value to non-empty user-facing text.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboard** (`Function`)
|
||||
- 📝 Fetches a single dashboard by ID.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_chart** (`Function`)
|
||||
- 📝 Fetches a single chart by ID.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboard_detail** (`Function`)
|
||||
- 📝 Fetches detailed dashboard information including related charts and datasets.
|
||||
- 🔗 CALLS -> `self.get_dashboard`
|
||||
- 🔗 CALLS -> `self.get_chart`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dashboard_detail.extract_dataset_id_from_form_data** (`Function`) `[TRIVIAL]`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_charts** (`Function`)
|
||||
- 📝 Fetches all charts with pagination support.
|
||||
- 🔗 CALLS -> `self._fetch_all_pages`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._extract_chart_ids_from_layout** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Traverses dashboard layout metadata and extracts chart IDs from common keys.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.export_dashboard** (`Function`)
|
||||
- 📝 Экспортирует дашборд в виде ZIP-архива.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.import_dashboard** (`Function`)
|
||||
- 📝 Импортирует дашборд из ZIP-файла.
|
||||
- 🔗 CALLS -> `self._do_import`
|
||||
- 🔗 CALLS -> `self.delete_dashboard`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.delete_dashboard** (`Function`)
|
||||
- 📝 Удаляет дашборд по его ID или slug.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_datasets** (`Function`)
|
||||
- 📝 Получает полный список датасетов, автоматически обрабатывая пагинацию.
|
||||
- 🔗 CALLS -> `self._fetch_all_pages`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_datasets_summary** (`Function`)
|
||||
- 📝 Fetches dataset metadata optimized for the Dataset Hub grid.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dataset_detail** (`Function`)
|
||||
- 📝 Fetches detailed dataset information including columns and linked dashboards
|
||||
- 🔗 CALLS -> `self.get_dataset`
|
||||
- 🔗 CALLS -> `self.network.request (for related_objects)`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_dataset** (`Function`)
|
||||
- 📝 Получает информацию о конкретном датасете по его ID.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.update_dataset** (`Function`)
|
||||
- 📝 Обновляет данные датасета по его ID.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_databases** (`Function`)
|
||||
- 📝 Получает полный список баз данных.
|
||||
- 🔗 CALLS -> `self._fetch_all_pages`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_database** (`Function`)
|
||||
- 📝 Получает информацию о конкретной базе данных по её ID.
|
||||
- 🔗 CALLS -> `self.network.request`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_databases_summary** (`Function`)
|
||||
- 📝 Fetch a summary of databases including uuid, name, and engine.
|
||||
- 🔗 CALLS -> `self.get_databases`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_database_by_uuid** (`Function`)
|
||||
- 📝 Find a database by its UUID.
|
||||
- 🔗 CALLS -> `self.get_databases`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._resolve_target_id_for_delete** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolves a dashboard ID from either an ID or a slug.
|
||||
- 🔗 CALLS -> `self.get_dashboards`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._do_import** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Performs the actual multipart upload for import.
|
||||
- 🔗 CALLS -> `self.network.upload_file`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._validate_export_response** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Validates that the export response is a non-empty ZIP archive.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._resolve_export_filename** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Determines the filename for an exported dashboard.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._validate_query_params** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensures query parameters have default page and page_size.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._fetch_total_object_count** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Fetches the total number of items for a given endpoint.
|
||||
- 🔗 CALLS -> `self.network.fetch_paginated_count`
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._fetch_all_pages** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Iterates through all pages to collect all data items.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient._validate_import_file** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Validates that the file to be imported is a valid ZIP with metadata.yaml.
|
||||
- ƒ **backend.src.core.superset_client.SupersetClient.get_all_resources** (`Function`)
|
||||
- 📝 Fetches all resources of a given type with id, uuid, and name columns.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **authenticate** (`Function`) `[TRIVIAL]`
|
||||
@@ -2609,54 +2725,54 @@
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.logger`
|
||||
- ƒ **ensure_encryption_key** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensure backend runtime has a persistent valid Fernet key.
|
||||
- 📦 **ConfigManagerModule** (`Module`) `[CRITICAL]`
|
||||
- 📦 **ConfigManager** (`Module`) `[CRITICAL]`
|
||||
- 📝 Manages application configuration persistence in DB with one-time migration from legacy JSON.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: Configuration must always be representable by AppConfig and persisted under global record id.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.config_models.AppConfig`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.database.SessionLocal`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.config.AppConfigRecord`
|
||||
- 🔗 CALLS -> `backend.src.core.logger.logger`
|
||||
- 🔗 CALLS -> `backend.src.core.logger.configure_logger`
|
||||
- 🔗 DEPENDS_ON -> `AppConfig`
|
||||
- 🔗 DEPENDS_ON -> `SessionLocal`
|
||||
- 🔗 DEPENDS_ON -> `AppConfigRecord`
|
||||
- 🔗 CALLS -> `logger`
|
||||
- 🔗 CALLS -> `configure_logger`
|
||||
- ℂ **ConfigManager** (`Class`) `[CRITICAL]`
|
||||
- 📝 Handles application configuration load, validation, mutation, and persistence lifecycle.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Initialize manager state from persisted or migrated configuration.
|
||||
- ƒ **_default_config** (`Function`)
|
||||
- ƒ **_default_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build default application configuration fallback.
|
||||
- ƒ **_sync_raw_payload_from_config** (`Function`)
|
||||
- ƒ **_sync_raw_payload_from_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Merge typed AppConfig state into raw payload while preserving unsupported legacy sections.
|
||||
- ƒ **_load_from_legacy_file** (`Function`)
|
||||
- ƒ **_load_from_legacy_file** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Load legacy JSON configuration for migration fallback path.
|
||||
- ƒ **_get_record** (`Function`)
|
||||
- ƒ **_get_record** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve global configuration record from DB.
|
||||
- ƒ **_load_config** (`Function`)
|
||||
- ƒ **_load_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Load configuration from DB or perform one-time migration from legacy JSON.
|
||||
- ƒ **_save_config_to_db** (`Function`)
|
||||
- ƒ **_save_config_to_db** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Persist provided AppConfig into the global DB configuration record.
|
||||
- ƒ **save** (`Function`)
|
||||
- ƒ **save** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Persist current in-memory configuration state.
|
||||
- ƒ **get_config** (`Function`)
|
||||
- ƒ **get_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Return current in-memory configuration snapshot.
|
||||
- ƒ **get_payload** (`Function`)
|
||||
- ƒ **get_payload** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Return full persisted payload including sections outside typed AppConfig schema.
|
||||
- ƒ **save_config** (`Function`)
|
||||
- ƒ **save_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Persist configuration provided either as typed AppConfig or raw payload dict.
|
||||
- ƒ **update_global_settings** (`Function`)
|
||||
- ƒ **update_global_settings** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Replace global settings and persist the resulting configuration.
|
||||
- ƒ **validate_path** (`Function`)
|
||||
- ƒ **validate_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Validate that path exists and is writable, creating it when absent.
|
||||
- ƒ **get_environments** (`Function`)
|
||||
- ƒ **get_environments** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Return all configured environments.
|
||||
- ƒ **has_environments** (`Function`)
|
||||
- ƒ **has_environments** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Check whether at least one environment exists in configuration.
|
||||
- ƒ **get_environment** (`Function`)
|
||||
- ƒ **get_environment** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve a configured environment by identifier.
|
||||
- ƒ **add_environment** (`Function`)
|
||||
- ƒ **add_environment** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Upsert environment by id into configuration and persist.
|
||||
- ƒ **update_environment** (`Function`)
|
||||
- ƒ **update_environment** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Update existing environment by id and preserve masked password placeholder behavior.
|
||||
- ƒ **delete_environment** (`Function`)
|
||||
- ƒ **delete_environment** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Delete environment by id and persist when deletion occurs.
|
||||
- 📦 **SchedulerModule** (`Module`)
|
||||
- 📝 Manages scheduled tasks using APScheduler.
|
||||
@@ -2730,6 +2846,8 @@
|
||||
- 📝 Applies additive schema upgrades for llm_validation_results table.
|
||||
- ƒ **_ensure_git_server_configs_columns** (`Function`)
|
||||
- 📝 Applies additive schema upgrades for git_server_configs table.
|
||||
- ƒ **_ensure_auth_users_columns** (`Function`)
|
||||
- 📝 Applies additive schema upgrades for auth users table.
|
||||
- ƒ **ensure_connection_configs_table** (`Function`)
|
||||
- 📝 Ensures the external connection registry table exists in the main database.
|
||||
- ƒ **init_db** (`Function`)
|
||||
@@ -2938,39 +3056,38 @@
|
||||
- 📝 Verifies a plain password against a hashed password.
|
||||
- ƒ **get_password_hash** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Generates a bcrypt hash for a plain password.
|
||||
- 📦 **backend.src.core.auth.repository** (`Module`) `[CRITICAL]`
|
||||
- 📦 **AuthRepository** (`Module`) `[CRITICAL]`
|
||||
- 📝 Data access layer for authentication and user preference entities.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: All database read/write operations must execute via the injected SQLAlchemy session boundary.
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy.orm.Session`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.auth`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.profile`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.logger.belief_scope`
|
||||
- 🔗 DEPENDS_ON -> `User:Class`
|
||||
- 🔗 DEPENDS_ON -> `Role:Class`
|
||||
- 🔗 DEPENDS_ON -> `Permission:Class`
|
||||
- 🔗 DEPENDS_ON -> `UserDashboardPreference:Class`
|
||||
- 🔗 DEPENDS_ON -> `belief_scope:Function`
|
||||
- ℂ **AuthRepository** (`Class`) `[CRITICAL]`
|
||||
- 📝 Encapsulates database operations for authentication-related entities.
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy.orm.Session`
|
||||
- ƒ **__init__** (`Function`) `[CRITICAL]`
|
||||
- 📝 Bind repository instance to an existing SQLAlchemy session.
|
||||
- ƒ **get_user_by_username** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a user entity by unique username.
|
||||
- ƒ **get_user_by_id** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a user entity by identifier.
|
||||
- ƒ **get_role_by_name** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a role entity by role name.
|
||||
- ƒ **update_last_login** (`Function`) `[CRITICAL]`
|
||||
- 📝 Update last_login timestamp for the provided user entity.
|
||||
- ƒ **get_role_by_id** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a role entity by identifier.
|
||||
- ƒ **get_permission_by_id** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a permission entity by identifier.
|
||||
- ƒ **get_permission_by_resource_action** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve a permission entity by resource and action pair.
|
||||
- ƒ **get_user_dashboard_preference** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve dashboard preference entity owned by specified user.
|
||||
- ƒ **save_user_dashboard_preference** (`Function`) `[CRITICAL]`
|
||||
- 📝 Persist dashboard preference entity and return refreshed persistent row.
|
||||
- ƒ **list_permissions** (`Function`) `[CRITICAL]`
|
||||
- 📝 List all permission entities available in storage.
|
||||
- 📝 Initialize repository with database session.
|
||||
- ƒ **get_user_by_id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve user by UUID.
|
||||
- ƒ **get_user_by_username** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve user by username.
|
||||
- ƒ **get_role_by_id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve role by UUID with permissions preloaded.
|
||||
- ƒ **get_role_by_name** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve role by unique name.
|
||||
- ƒ **get_permission_by_id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve permission by UUID.
|
||||
- ƒ **get_permission_by_resource_action** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve permission by resource and action tuple.
|
||||
- ƒ **list_permissions** (`Function`) `[TRIVIAL]`
|
||||
- 📝 List all system permissions.
|
||||
- ƒ **get_user_dashboard_preference** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve dashboard filters/preferences for a user.
|
||||
- ƒ **get_roles_by_ad_groups** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve roles that match a list of AD group names.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **src.core.auth** (`Package`) `[TRIVIAL]`
|
||||
- 📝 Authentication and authorization package root.
|
||||
- 📦 **test_auth** (`Module`)
|
||||
@@ -3022,7 +3139,7 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_normalize_base_url** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.core.utils.fileio** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **FileIO** (`Module`)
|
||||
- 📝 Предоставляет набор утилит для управления файловыми операциями, включая работу с временными файлами, архивами ZIP, файлами YAML и очистку директорий.
|
||||
- 🏗️ Layer: Infra
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.logger`
|
||||
@@ -3327,15 +3444,19 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **json_serializable** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **TaskManagerModule** (`Module`) `[CRITICAL]`
|
||||
- 📦 **TaskManager** (`Module`) `[CRITICAL]`
|
||||
- 📝 Manages the lifecycle of tasks, including their creation, execution, and state tracking. It uses a thread pool to run plugins asynchronously.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Task IDs are unique.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.plugin_loader`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.task_manager.persistence`
|
||||
- 🔗 DEPENDS_ON -> `PluginLoader:Class`
|
||||
- 🔗 DEPENDS_ON -> `TaskPersistenceModule:Module`
|
||||
- ℂ **TaskManager** (`Class`) `[CRITICAL]`
|
||||
- 📝 Manages the lifecycle of tasks, including their creation, execution, and state tracking.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Log entries are never deleted after being added to a task.
|
||||
- 🔗 DEPENDS_ON -> `TaskPersistenceService:Class`
|
||||
- 🔗 DEPENDS_ON -> `TaskLogPersistenceService:Class`
|
||||
- 🔗 DEPENDS_ON -> `PluginLoader:Class`
|
||||
- ƒ **__init__** (`Function`) `[CRITICAL]`
|
||||
- 📝 Initialize the TaskManager with dependencies.
|
||||
- ƒ **_flusher_loop** (`Function`)
|
||||
@@ -3468,22 +3589,29 @@
|
||||
- 📝 Verify TaskContext preserves optional background task scheduler across sub-context creation.
|
||||
- ƒ **test_task_context_preserves_background_tasks_across_sub_context** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Plugins must be able to access background_tasks from both root and sub-context loggers.
|
||||
- 📦 **backend.src.api.auth** (`Module`)
|
||||
- 📦 **AuthApi** (`Module`)
|
||||
- 📝 Authentication API endpoints.
|
||||
- 🏗️ Layer: API
|
||||
- 🔒 Invariant: All auth endpoints must return consistent error codes.
|
||||
- 🔗 DEPENDS_ON -> `AuthRepository:Class`
|
||||
- 📦 **router** (`Variable`) `[TRIVIAL]`
|
||||
- 📝 APIRouter instance for authentication routes.
|
||||
- ƒ **login_for_access_token** (`Function`)
|
||||
- 📝 Authenticates a user and returns a JWT access token.
|
||||
- 🔗 CALLS -> `AuthService.authenticate_user`
|
||||
- 🔗 CALLS -> `AuthService.create_session`
|
||||
- ƒ **read_users_me** (`Function`)
|
||||
- 📝 Retrieves the profile of the currently authenticated user.
|
||||
- 🔗 DEPENDS_ON -> `get_current_user`
|
||||
- ƒ **logout** (`Function`)
|
||||
- 📝 Logs out the current user (placeholder for session revocation).
|
||||
- 🔗 DEPENDS_ON -> `get_current_user`
|
||||
- ƒ **login_adfs** (`Function`)
|
||||
- 📝 Initiates the ADFS OIDC login flow.
|
||||
- ƒ **auth_callback_adfs** (`Function`)
|
||||
- 📝 Handles the callback from ADFS after successful authentication.
|
||||
- 🔗 CALLS -> `AuthService.provision_adfs_user`
|
||||
- 🔗 CALLS -> `AuthService.create_session`
|
||||
- 📦 **src.api** (`Package`) `[TRIVIAL]`
|
||||
- 📝 Backend API package root.
|
||||
- 📦 **router** (`Global`) `[TRIVIAL]`
|
||||
@@ -3508,7 +3636,7 @@
|
||||
- 📝 API endpoints for the Dataset Hub - listing datasets with mapping progress
|
||||
- 🏗️ Layer: API
|
||||
- 🔒 Invariant: All dataset responses include last_task metadata
|
||||
- 🔗 DEPENDS_ON -> `backend.src.dependencies`
|
||||
- 🔗 DEPENDS_ON -> `AppDependencies`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.services.resource_service.ResourceService`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.superset_client.SupersetClient`
|
||||
- 📦 **MappedFields** (`DataClass`) `[TRIVIAL]`
|
||||
@@ -3685,11 +3813,11 @@
|
||||
- ƒ **get_environment_databases** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Fetch the list of databases from a specific environment.
|
||||
- 🏗️ Layer: API
|
||||
- 📦 **backend.src.api.routes.migration** (`Module`) `[CRITICAL]`
|
||||
- 📦 **MigrationApi** (`Module`) `[CRITICAL]`
|
||||
- 📝 HTTP contract layer for migration orchestration, settings, dry-run, and mapping sync endpoints.
|
||||
- 🏗️ Layer: Infra
|
||||
- 🔒 Invariant: Migration endpoints never execute with invalid environment references and always return explicit HTTP errors on guard failures.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.dependencies`
|
||||
- 🔗 DEPENDS_ON -> `AppDependencies`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.database`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.superset_client.SupersetClient`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.migration.dry_run_orchestrator.MigrationDryRunService`
|
||||
@@ -3717,21 +3845,34 @@
|
||||
- 📝 Retrieve a list of all available plugins.
|
||||
- 📦 **backend.src.api.routes.clean_release_v2** (`Module`)
|
||||
- 📝 Redesigned clean release API for headless candidate lifecycle.
|
||||
- 🏗️ Layer: API
|
||||
- ƒ **register_candidate** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **import_artifacts** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **build_manifest** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **approve_candidate_endpoint** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **reject_candidate_endpoint** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **publish_candidate_endpoint** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **revoke_publication_endpoint** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ℂ **ApprovalRequest** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Schema for approval request payload.
|
||||
- ℂ **PublishRequest** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Schema for publication request payload.
|
||||
- ℂ **RevokeRequest** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Schema for revocation request payload.
|
||||
- ƒ **register_candidate** (`Function`)
|
||||
- 📝 Register a new release candidate.
|
||||
- 🔗 CALLS -> `CleanReleaseRepository.save_candidate`
|
||||
- ƒ **import_artifacts** (`Function`)
|
||||
- 📝 Associate artifacts with a release candidate.
|
||||
- 🔗 CALLS -> `CleanReleaseRepository.get_candidate`
|
||||
- ƒ **build_manifest** (`Function`)
|
||||
- 📝 Generate distribution manifest for a candidate.
|
||||
- 🔗 CALLS -> `CleanReleaseRepository.save_manifest`
|
||||
- 🔗 CALLS -> `CleanReleaseRepository.get_candidate`
|
||||
- ƒ **approve_candidate_endpoint** (`Function`)
|
||||
- 📝 Endpoint to record candidate approval.
|
||||
- 🔗 CALLS -> `approve_candidate`
|
||||
- ƒ **reject_candidate_endpoint** (`Function`)
|
||||
- 📝 Endpoint to record candidate rejection.
|
||||
- 🔗 CALLS -> `reject_candidate`
|
||||
- ƒ **publish_candidate_endpoint** (`Function`)
|
||||
- 📝 Endpoint to publish an approved candidate.
|
||||
- 🔗 CALLS -> `publish_candidate`
|
||||
- ƒ **revoke_publication_endpoint** (`Function`)
|
||||
- 📝 Endpoint to revoke a previous publication.
|
||||
- 🔗 CALLS -> `revoke_publication`
|
||||
- 📦 **backend.src.api.routes.mappings** (`Module`)
|
||||
- 📝 API endpoints for managing database mappings and getting suggestions.
|
||||
- 🏗️ Layer: API
|
||||
@@ -3796,7 +3937,7 @@
|
||||
- 📝 Updates an existing validation policy.
|
||||
- ƒ **delete_validation_policy** (`Function`)
|
||||
- 📝 Deletes a validation policy.
|
||||
- 📦 **backend.src.api.routes.admin** (`Module`)
|
||||
- 📦 **AdminApi** (`Module`)
|
||||
- 📝 Admin API endpoints for user and role management.
|
||||
- 🏗️ Layer: API
|
||||
- 🔒 Invariant: All endpoints in this module require 'Admin' role or 'admin' scope.
|
||||
@@ -4044,7 +4185,7 @@
|
||||
- 🏗️ Layer: UI (API)
|
||||
- 🔒 Invariant: Endpoints are read-only and do not trigger long-running tasks.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.services.reports.report_service.ReportsService`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.dependencies`
|
||||
- 🔗 DEPENDS_ON -> `AppDependencies`
|
||||
- ƒ **_parse_csv_enum_list** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Parse comma-separated query value into enum list.
|
||||
- ƒ **list_reports** (`Function`)
|
||||
@@ -4101,7 +4242,7 @@
|
||||
- 📝 API endpoints for the Dashboard Hub - listing dashboards with Git and task status
|
||||
- 🏗️ Layer: API
|
||||
- 🔒 Invariant: All dashboard responses include git_status and last_task metadata
|
||||
- 🔗 DEPENDS_ON -> `backend.src.dependencies`
|
||||
- 🔗 DEPENDS_ON -> `AppDependencies`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.services.resource_service.ResourceService`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.superset_client.SupersetClient`
|
||||
- 📦 **GitStatus** (`DataClass`)
|
||||
@@ -4224,7 +4365,6 @@
|
||||
- 📦 **backend.src.api.routes.__tests__.test_git_status_route** (`Module`)
|
||||
- 📝 Validate status endpoint behavior for missing and error repository states.
|
||||
- 🏗️ Layer: Domain (Tests)
|
||||
- 🔗 CALLS -> `src.api.routes.git.get_repository_status`
|
||||
- ƒ **test_get_repository_status_returns_no_repo_payload_for_missing_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensure missing local repository is represented as NO_REPO payload instead of an API error.
|
||||
- ƒ **test_get_repository_status_propagates_non_404_http_exception** (`Function`) `[TRIVIAL]`
|
||||
@@ -4620,57 +4760,28 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_get_repo_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.api.routes.__tests__.test_assistant_api** (`Module`)
|
||||
- 📦 **AssistantApiTests** (`Module`)
|
||||
- 📝 Validate assistant API endpoint logic via direct async handler invocation.
|
||||
- 🏗️ Layer: UI (API Tests)
|
||||
- 🔒 Invariant: Every test clears assistant in-memory state before execution.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.api.routes.assistant`
|
||||
- ƒ **_run_async** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Execute async endpoint handler in synchronous test context.
|
||||
- ℂ **_FakeTask** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Lightweight task stub used by assistant API tests.
|
||||
- 🔗 BINDS_TO -> `AssistantApiTests`
|
||||
- ℂ **_FakeTaskManager** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Minimal async-compatible TaskManager fixture for deterministic test flows.
|
||||
- 🔗 BINDS_TO -> `AssistantApiTests`
|
||||
- ℂ **_FakeConfigManager** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Environment config fixture with dev/prod aliases for parser tests.
|
||||
- 🔗 BINDS_TO -> `AssistantApiTests`
|
||||
- ƒ **_admin_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build admin principal fixture.
|
||||
- ƒ **_limited_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build non-admin principal fixture.
|
||||
- ℂ **_FakeQuery** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Minimal chainable query object for fake SQLAlchemy-like DB behavior in tests.
|
||||
- 🔗 BINDS_TO -> `AssistantApiTests`
|
||||
- ℂ **_FakeDb** (`Class`) `[TRIVIAL]`
|
||||
- 📝 In-memory fake database implementing subset of Session interface used by assistant routes.
|
||||
- 🔗 BINDS_TO -> `AssistantApiTests`
|
||||
- ƒ **_clear_assistant_state** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Reset in-memory assistant registries for isolation between tests.
|
||||
- ƒ **test_unknown_command_returns_needs_clarification** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Unknown command should return clarification state and unknown intent.
|
||||
- ƒ **test_capabilities_question_returns_successful_help** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Capability query should return deterministic help response, not clarification.
|
||||
- ƒ **test_non_admin_command_returns_denied** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Non-admin user must receive denied state for privileged command.
|
||||
- ƒ **test_migration_to_prod_requires_confirmation_and_can_be_confirmed** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Migration to prod must require confirmation and then start task after explicit confirm.
|
||||
- ƒ **test_status_query_returns_task_status** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Task status command must surface current status text for existing task id.
|
||||
- ƒ **test_status_query_without_task_id_returns_latest_user_task** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Status command without explicit task_id should resolve to latest task for current user.
|
||||
- ƒ **test_llm_validation_with_dashboard_ref_requires_confirmation** (`Function`) `[TRIVIAL]`
|
||||
- 📝 LLM validation with dashboard_ref should now require confirmation before dispatch.
|
||||
- ƒ **test_list_conversations_groups_by_conversation_and_marks_archived** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Conversations endpoint must group messages and compute archived marker by inactivity threshold.
|
||||
- ƒ **test_history_from_latest_returns_recent_page_first** (`Function`) `[TRIVIAL]`
|
||||
- 📝 History endpoint from_latest mode must return newest page while preserving chronological order in chunk.
|
||||
- ƒ **test_list_conversations_archived_only_filters_active** (`Function`) `[TRIVIAL]`
|
||||
- 📝 archived_only mode must return only archived conversations.
|
||||
- ƒ **test_guarded_operation_always_requires_confirmation** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Non-dangerous (guarded) commands must still require confirmation before execution.
|
||||
- ƒ **test_guarded_operation_confirm_roundtrip** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Guarded operation must execute successfully after explicit confirmation.
|
||||
- ƒ **test_confirm_nonexistent_id_returns_404** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Confirming a non-existent ID should raise 404.
|
||||
- ƒ **test_migration_with_dry_run_includes_summary** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Migration command with dry run flag must return the dry run summary in confirmation text.
|
||||
- 📝 Capability query should return deterministic help response.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
@@ -4681,6 +4792,10 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_tasks** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_all_tasks** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_environments** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_config** (`Function`) `[TRIVIAL]`
|
||||
@@ -4691,29 +4806,29 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **order_by** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **limit** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **offset** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **first** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **all** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **count** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **offset** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **limit** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **add** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **query** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **add** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **commit** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **rollback** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **run** (`Function`) `[TRIVIAL]`
|
||||
- ƒ **merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **refresh** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.api.routes.__tests__.test_migration_routes** (`Module`)
|
||||
- 📝 Unit tests for migration API route handlers.
|
||||
@@ -4812,7 +4927,7 @@
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy`
|
||||
- ℂ **ConnectionConfig** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Stores credentials for external databases used for column mapping.
|
||||
- 📦 **backend.src.models.mapping** (`Module`)
|
||||
- 📦 **MappingModels** (`Module`)
|
||||
- 📝 Defines the database schema for environment metadata and database mappings using SQLAlchemy.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: All primary keys are UUID strings.
|
||||
@@ -4958,7 +5073,7 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **check_run_id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.models.auth** (`Module`)
|
||||
- 📦 **AuthModels** (`Module`)
|
||||
- 📝 SQLAlchemy models for multi-user authentication and authorization.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: Usernames and emails must be unique.
|
||||
@@ -4977,11 +5092,11 @@
|
||||
- ℂ **ADGroupMapping** (`Class`) `[CRITICAL]`
|
||||
- 📝 Maps an Active Directory group to a local System Role.
|
||||
- 🔗 DEPENDS_ON -> `Role`
|
||||
- 📦 **backend.src.models.profile** (`Module`)
|
||||
- 📦 **ProfileModels** (`Module`)
|
||||
- 📝 Defines persistent per-user profile settings for dashboard filter, Git identity/token, and UX preferences.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: Sensitive Git token is stored encrypted and never returned in plaintext.
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.auth`
|
||||
- 🔗 DEPENDS_ON -> `AuthModels`
|
||||
- ℂ **UserDashboardPreference** (`Class`)
|
||||
- 📝 Stores Superset username binding and default "my dashboards" toggle for one authenticated user.
|
||||
- 📦 **src.models** (`Package`) `[TRIVIAL]`
|
||||
@@ -5319,14 +5434,22 @@
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.auth.Role`
|
||||
- ℂ **AuthService** (`Class`)
|
||||
- 📝 Provides high-level authentication services.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- ƒ **AuthService.__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Initializes the authentication service with repository access over an active DB session.
|
||||
- ƒ **authenticate_user** (`Function`)
|
||||
- ƒ **AuthService.authenticate_user** (`Function`)
|
||||
- 📝 Validates credentials and account state for local username/password authentication.
|
||||
- ƒ **create_session** (`Function`)
|
||||
- ƒ **AuthService.create_session** (`Function`)
|
||||
- 📝 Issues an access token payload for an already authenticated user.
|
||||
- ƒ **provision_adfs_user** (`Function`)
|
||||
- ƒ **AuthService.provision_adfs_user** (`Function`)
|
||||
- 📝 Performs ADFS Just-In-Time provisioning and role synchronization from AD group mappings.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **authenticate_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_session** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **provision_adfs_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.services.git_service** (`Module`)
|
||||
- 📝 Core Git logic using GitPython to manage dashboard repositories.
|
||||
- 🏗️ Layer: Service
|
||||
@@ -5338,18 +5461,202 @@
|
||||
- 📝 Wrapper for GitPython operations with semantic logging and error handling.
|
||||
- ƒ **backend.src.services.git_service.GitService.__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Initializes the GitService with a base path for repositories.
|
||||
- ƒ **_ensure_base_path_exists** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensure the repositories root directory exists and is a directory.
|
||||
- ƒ **backend.src.services.git_service.GitService._resolve_base_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve base repository directory from explicit argument or global storage settings.
|
||||
- 🔗 CALLS -> `GitService._resolve_base_path`
|
||||
- 🔗 CALLS -> `GitService._ensure_base_path_exists`
|
||||
- ƒ **backend.src.services.git_service.GitService._ensure_base_path_exists** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensure the repositories root directory exists and is a directory.
|
||||
- ƒ **backend.src.services.git_service.GitService._resolve_base_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve base repository directory from explicit argument or global storage settings.
|
||||
- ƒ **backend.src.services.git_service.GitService._normalize_repo_key** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Convert user/dashboard-provided key to safe filesystem directory name.
|
||||
- ƒ **backend.src.services.git_service.GitService._update_repo_local_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Persist repository local_path in GitRepository table when record exists.
|
||||
- ƒ **backend.src.services.git_service.GitService._migrate_repo_directory** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Move legacy repository directory to target path and sync DB metadata.
|
||||
- 🔗 CALLS -> `GitService._update_repo_local_path`
|
||||
- ƒ **backend.src.services.git_service.GitService._ensure_gitflow_branches** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Ensure standard GitFlow branches (main/dev/preprod) exist locally and on origin.
|
||||
- ƒ **backend.src.services.git_service.GitService._get_repo_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolves the local filesystem path for a dashboard's repository.
|
||||
- 🔗 CALLS -> `GitService._normalize_repo_key`
|
||||
- 🔗 CALLS -> `GitService._migrate_repo_directory`
|
||||
- 🔗 CALLS -> `GitService._update_repo_local_path`
|
||||
- ƒ **backend.src.services.git_service.GitService.init_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Initialize or clone a repository for a dashboard.
|
||||
- 🔗 CALLS -> `GitService._get_repo_path`
|
||||
- 🔗 CALLS -> `GitService._ensure_gitflow_branches`
|
||||
- ƒ **backend.src.services.git_service.GitService.delete_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Remove local repository and DB binding for a dashboard.
|
||||
- 🔗 CALLS -> `GitService._get_repo_path`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Get Repo object for a dashboard.
|
||||
- 🔗 CALLS -> `GitService._get_repo_path`
|
||||
- ƒ **backend.src.services.git_service.GitService.configure_identity** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Configure repository-local Git committer identity for user-scoped operations.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.list_branches** (`Function`) `[TRIVIAL]`
|
||||
- 📝 List all branches for a dashboard's repository.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_branch** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create a new branch from an existing one.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.checkout_branch** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Switch to a specific branch.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.commit_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Stage and commit changes.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService._extract_http_host** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Extract normalized host[:port] from HTTP(S) URL.
|
||||
- ƒ **backend.src.services.git_service.GitService._strip_url_credentials** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Remove credentials from URL while preserving scheme/host/path.
|
||||
- ƒ **backend.src.services.git_service.GitService._replace_host_in_url** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Replace source URL host with host from configured server URL.
|
||||
- ƒ **backend.src.services.git_service.GitService._align_origin_host_with_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-align local origin host to configured Git server host when they drift.
|
||||
- 🔗 CALLS -> `GitService._extract_http_host`
|
||||
- 🔗 CALLS -> `GitService._replace_host_in_url`
|
||||
- 🔗 CALLS -> `GitService._strip_url_credentials`
|
||||
- ƒ **backend.src.services.git_service.GitService.push_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Push local commits to remote.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- 🔗 CALLS -> `GitService._align_origin_host_with_config`
|
||||
- ƒ **backend.src.services.git_service.GitService._read_blob_text** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Read text from a Git blob.
|
||||
- ƒ **backend.src.services.git_service.GitService._get_unmerged_file_paths** (`Function`) `[TRIVIAL]`
|
||||
- 📝 List files with merge conflicts.
|
||||
- ƒ **backend.src.services.git_service.GitService._build_unfinished_merge_payload** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build payload for unfinished merge state.
|
||||
- 🔗 CALLS -> `GitService._get_unmerged_file_paths`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_merge_status** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Get current merge status for a dashboard repository.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- 🔗 CALLS -> `GitService._build_unfinished_merge_payload`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_merge_conflicts** (`Function`) `[TRIVIAL]`
|
||||
- 📝 List all files with conflicts and their contents.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- 🔗 CALLS -> `GitService._read_blob_text`
|
||||
- ƒ **backend.src.services.git_service.GitService.resolve_merge_conflicts** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve conflicts using specified strategy.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.abort_merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Abort ongoing merge.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.continue_merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Finalize merge after conflict resolution.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- 🔗 CALLS -> `GitService._get_unmerged_file_paths`
|
||||
- ƒ **backend.src.services.git_service.GitService.pull_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Pull changes from remote.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- 🔗 CALLS -> `GitService._build_unfinished_merge_payload`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_status** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Get current repository status (dirty files, untracked, etc.)
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_diff** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Generate diff for a file or the whole repository.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_commit_history** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Retrieve commit history for a repository.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.test_connection** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Test connection to Git provider using PAT.
|
||||
- ƒ **backend.src.services.git_service.GitService._normalize_git_server_url** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Normalize Git server URL for provider API calls.
|
||||
- ƒ **backend.src.services.git_service.GitService._gitea_headers** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build Gitea API authorization headers.
|
||||
- ƒ **backend.src.services.git_service.GitService._gitea_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Execute HTTP request against Gitea API with stable error mapping.
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- 🔗 CALLS -> `GitService._gitea_headers`
|
||||
- ƒ **backend.src.services.git_service.GitService.get_gitea_current_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Resolve current Gitea user for PAT.
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- ƒ **backend.src.services.git_service.GitService.list_gitea_repositories** (`Function`) `[TRIVIAL]`
|
||||
- 📝 List repositories visible to authenticated Gitea user.
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_gitea_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create repository in Gitea for authenticated user.
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- ƒ **backend.src.services.git_service.GitService.delete_gitea_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Delete repository in Gitea.
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- ƒ **backend.src.services.git_service.GitService._gitea_branch_exists** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Check whether a branch exists in Gitea repository.
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- ƒ **backend.src.services.git_service.GitService._build_gitea_pr_404_detail** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build actionable error detail for Gitea PR 404 responses.
|
||||
- 🔗 CALLS -> `GitService._gitea_branch_exists`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_github_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create repository in GitHub or GitHub Enterprise.
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_gitlab_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create repository(project) in GitLab.
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- ƒ **backend.src.services.git_service.GitService._parse_remote_repo_identity** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Parse owner/repo from remote URL for Git server API operations.
|
||||
- ƒ **backend.src.services.git_service.GitService._derive_server_url_from_remote** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Build API base URL from remote repository URL without credentials.
|
||||
- ƒ **backend.src.services.git_service.GitService.promote_direct_merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Perform direct merge between branches in local repo and push target branch.
|
||||
- 🔗 CALLS -> `GitService.get_repo`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_gitea_pull_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create pull request in Gitea.
|
||||
- 🔗 CALLS -> `GitService._parse_remote_repo_identity`
|
||||
- 🔗 CALLS -> `GitService._gitea_request`
|
||||
- 🔗 CALLS -> `GitService._derive_server_url_from_remote`
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- 🔗 CALLS -> `GitService._build_gitea_pr_404_detail`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_github_pull_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create pull request in GitHub or GitHub Enterprise.
|
||||
- 🔗 CALLS -> `GitService._parse_remote_repo_identity`
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- ƒ **backend.src.services.git_service.GitService.create_gitlab_merge_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Create merge request in GitLab.
|
||||
- 🔗 CALLS -> `GitService._parse_remote_repo_identity`
|
||||
- 🔗 CALLS -> `GitService._normalize_git_server_url`
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_ensure_base_path_exists** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_resolve_base_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_normalize_repo_key** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_update_repo_local_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_migrate_repo_directory** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_ensure_gitflow_branches** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_get_repo_path** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **init_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **delete_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_repo** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **configure_identity** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **list_branches** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_branch** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **checkout_branch** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **commit_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_extract_http_host** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_strip_url_credentials** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_replace_host_in_url** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_align_origin_host_with_config** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **push_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_read_blob_text** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_get_unmerged_file_paths** (`Function`) `[TRIVIAL]`
|
||||
@@ -5366,10 +5673,44 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **continue_merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **pull_changes** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_status** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_diff** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_commit_history** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **test_connection** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_normalize_git_server_url** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_gitea_headers** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_gitea_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_gitea_current_user** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **list_gitea_repositories** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_gitea_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **delete_gitea_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_gitea_branch_exists** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_build_gitea_pr_404_detail** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_github_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_gitlab_repository** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_parse_remote_repo_identity** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_derive_server_url_from_remote** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **promote_direct_merge** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_gitea_pull_request** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **create_github_pull_request** (`Function`) `[TRIVIAL]`
|
||||
@@ -34,7 +34,14 @@ Use these for code generation (Style Transfer).
|
||||
## 3. DOMAIN MAP (Modules)
|
||||
* **High-level Module Map:** `.ai/structure/MODULE_MAP.md` -> `[DEF:Module_Map]`
|
||||
* **Low-level Project Map:** `.ai/structure/PROJECT_MAP.md` -> `[DEF:Project_Map]`
|
||||
* **Apache Superset OpenAPI:** `.ai/openapi/superset_openapi.json` -> `[DEF:Doc:Superset_OpenAPI]`
|
||||
* **Apache Superset OpenAPI Source:** `.ai/openapi/superset_openapi.json` -> `[DEF:Doc:Superset_OpenAPI]`
|
||||
* **Apache Superset OpenAPI Split Index:** `.ai/openapi/superset/README.md` -> `[DEF:Doc:Superset_OpenAPI]`
|
||||
* **Superset OpenAPI Sections:**
|
||||
* `.ai/openapi/superset/meta.json`
|
||||
* `.ai/openapi/superset/components/responses.json`
|
||||
* `.ai/openapi/superset/components/schemas.json`
|
||||
* `.ai/openapi/superset/components/securitySchemes.json`
|
||||
* `.ai/openapi/superset/paths`
|
||||
* **Backend Core:** `backend/src/core` -> `[DEF:Module:Backend_Core]`
|
||||
* **Backend API:** `backend/src/api` -> `[DEF:Module:Backend_API]`
|
||||
* **Frontend Lib:** `frontend/src/lib` -> `[DEF:Module:Frontend_Lib]`
|
||||
|
||||
41
.ai/openapi/superset/README.md
Normal file
41
.ai/openapi/superset/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Superset OpenAPI split index
|
||||
|
||||
Source: `.ai/openapi/superset_openapi.json`
|
||||
|
||||
## Sections
|
||||
|
||||
- `meta.json` — OpenAPI version and info
|
||||
- `components/responses.json` — 7 response definitions
|
||||
- `components/schemas.json` — 359 schema definitions
|
||||
- `components/securitySchemes.json` — 2 security scheme definitions
|
||||
- `paths/` — 27 API resource groups
|
||||
|
||||
## Path groups
|
||||
|
||||
- `paths/advanced_data_type.json` — 2 paths
|
||||
- `paths/annotation_layer.json` — 6 paths
|
||||
- `paths/assets.json` — 2 paths
|
||||
- `paths/async_event.json` — 1 paths
|
||||
- `paths/available_domains.json` — 1 paths
|
||||
- `paths/cachekey.json` — 1 paths
|
||||
- `paths/chart.json` — 16 paths
|
||||
- `paths/css_template.json` — 4 paths
|
||||
- `paths/dashboard.json` — 23 paths
|
||||
- `paths/database.json` — 28 paths
|
||||
- `paths/dataset.json` — 15 paths
|
||||
- `paths/datasource.json` — 1 paths
|
||||
- `paths/embedded_dashboard.json` — 1 paths
|
||||
- `paths/explore.json` — 5 paths
|
||||
- `paths/log.json` — 3 paths
|
||||
- `paths/me.json` — 2 paths
|
||||
- `paths/menu.json` — 1 paths
|
||||
- `paths/misc.json` — 1 paths
|
||||
- `paths/query.json` — 6 paths
|
||||
- `paths/report.json` — 7 paths
|
||||
- `paths/rowlevelsecurity.json` — 4 paths
|
||||
- `paths/saved_query.json` — 7 paths
|
||||
- `paths/security.json` — 32 paths
|
||||
- `paths/sqllab.json` — 8 paths
|
||||
- `paths/tag.json` — 10 paths
|
||||
- `paths/theme.json` — 10 paths
|
||||
- `paths/user.json` — 1 paths
|
||||
188
.ai/openapi/superset/components/responses.json
Normal file
188
.ai/openapi/superset/components/responses.json
Normal file
@@ -0,0 +1,188 @@
|
||||
{
|
||||
"400": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Bad request"
|
||||
},
|
||||
"401": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Not found"
|
||||
},
|
||||
"410": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"errors": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"error_type": {
|
||||
"enum": [
|
||||
"FRONTEND_CSRF_ERROR",
|
||||
"FRONTEND_NETWORK_ERROR",
|
||||
"FRONTEND_TIMEOUT_ERROR",
|
||||
"GENERIC_DB_ENGINE_ERROR",
|
||||
"COLUMN_DOES_NOT_EXIST_ERROR",
|
||||
"TABLE_DOES_NOT_EXIST_ERROR",
|
||||
"SCHEMA_DOES_NOT_EXIST_ERROR",
|
||||
"CONNECTION_INVALID_USERNAME_ERROR",
|
||||
"CONNECTION_INVALID_PASSWORD_ERROR",
|
||||
"CONNECTION_INVALID_HOSTNAME_ERROR",
|
||||
"CONNECTION_PORT_CLOSED_ERROR",
|
||||
"CONNECTION_INVALID_PORT_ERROR",
|
||||
"CONNECTION_HOST_DOWN_ERROR",
|
||||
"CONNECTION_ACCESS_DENIED_ERROR",
|
||||
"CONNECTION_UNKNOWN_DATABASE_ERROR",
|
||||
"CONNECTION_DATABASE_PERMISSIONS_ERROR",
|
||||
"CONNECTION_MISSING_PARAMETERS_ERROR",
|
||||
"OBJECT_DOES_NOT_EXIST_ERROR",
|
||||
"SYNTAX_ERROR",
|
||||
"CONNECTION_DATABASE_TIMEOUT",
|
||||
"VIZ_GET_DF_ERROR",
|
||||
"UNKNOWN_DATASOURCE_TYPE_ERROR",
|
||||
"FAILED_FETCHING_DATASOURCE_INFO_ERROR",
|
||||
"TABLE_SECURITY_ACCESS_ERROR",
|
||||
"DATASOURCE_SECURITY_ACCESS_ERROR",
|
||||
"DATABASE_SECURITY_ACCESS_ERROR",
|
||||
"QUERY_SECURITY_ACCESS_ERROR",
|
||||
"MISSING_OWNERSHIP_ERROR",
|
||||
"USER_ACTIVITY_SECURITY_ACCESS_ERROR",
|
||||
"DASHBOARD_SECURITY_ACCESS_ERROR",
|
||||
"CHART_SECURITY_ACCESS_ERROR",
|
||||
"OAUTH2_REDIRECT",
|
||||
"OAUTH2_REDIRECT_ERROR",
|
||||
"BACKEND_TIMEOUT_ERROR",
|
||||
"DATABASE_NOT_FOUND_ERROR",
|
||||
"TABLE_NOT_FOUND_ERROR",
|
||||
"MISSING_TEMPLATE_PARAMS_ERROR",
|
||||
"INVALID_TEMPLATE_PARAMS_ERROR",
|
||||
"RESULTS_BACKEND_NOT_CONFIGURED_ERROR",
|
||||
"DML_NOT_ALLOWED_ERROR",
|
||||
"INVALID_CTAS_QUERY_ERROR",
|
||||
"INVALID_CVAS_QUERY_ERROR",
|
||||
"SQLLAB_TIMEOUT_ERROR",
|
||||
"RESULTS_BACKEND_ERROR",
|
||||
"ASYNC_WORKERS_ERROR",
|
||||
"ADHOC_SUBQUERY_NOT_ALLOWED_ERROR",
|
||||
"INVALID_SQL_ERROR",
|
||||
"RESULT_TOO_LARGE_ERROR",
|
||||
"GENERIC_COMMAND_ERROR",
|
||||
"GENERIC_BACKEND_ERROR",
|
||||
"INVALID_PAYLOAD_FORMAT_ERROR",
|
||||
"INVALID_PAYLOAD_SCHEMA_ERROR",
|
||||
"MARSHMALLOW_ERROR",
|
||||
"REPORT_NOTIFICATION_ERROR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"extra": {
|
||||
"type": "object"
|
||||
},
|
||||
"level": {
|
||||
"enum": [
|
||||
"info",
|
||||
"warning",
|
||||
"error"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Gone"
|
||||
},
|
||||
"422": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Could not process entity"
|
||||
},
|
||||
"500": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Fatal error"
|
||||
}
|
||||
}\n
|
||||
12325
.ai/openapi/superset/components/schemas.json
Normal file
12325
.ai/openapi/superset/components/schemas.json
Normal file
File diff suppressed because it is too large
Load Diff
12
.ai/openapi/superset/components/securitySchemes.json
Normal file
12
.ai/openapi/superset/components/securitySchemes.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"jwt": {
|
||||
"bearerFormat": "JWT",
|
||||
"scheme": "bearer",
|
||||
"type": "http"
|
||||
},
|
||||
"jwt_refresh": {
|
||||
"bearerFormat": "JWT",
|
||||
"scheme": "bearer",
|
||||
"type": "http"
|
||||
}
|
||||
}\n
|
||||
8
.ai/openapi/superset/meta.json
Normal file
8
.ai/openapi/superset/meta.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"info": {
|
||||
"description": "Superset",
|
||||
"title": "Superset",
|
||||
"version": "v1"
|
||||
},
|
||||
"openapi": "3.0.2"
|
||||
}\n
|
||||
101
.ai/openapi/superset/paths/advanced_data_type.json
Normal file
101
.ai/openapi/superset/paths/advanced_data_type.json
Normal file
@@ -0,0 +1,101 @@
|
||||
{
|
||||
"/api/v1/advanced_data_type/convert": {
|
||||
"get": {
|
||||
"description": "Returns an AdvancedDataTypeResponse object populated with the passed in args.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/advanced_data_type_convert_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AdvancedDataTypeSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "AdvancedDataTypeResponse object has been returned."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Return an AdvancedDataTypeResponse",
|
||||
"tags": [
|
||||
"Advanced Data Type"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/advanced_data_type/types": {
|
||||
"get": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "a successful return of the available advanced data types has taken place."
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Return a list of available advanced data types",
|
||||
"tags": [
|
||||
"Advanced Data Type"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
998
.ai/openapi/superset/paths/annotation_layer.json
Normal file
998
.ai/openapi/superset/paths/annotation_layer.json
Normal file
@@ -0,0 +1,998 @@
|
||||
{
|
||||
"/api/v1/annotation_layer/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "CSS templates bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete multiple annotation layers in a bulk operation",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of annotation layers, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of annotation layers",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation Layer schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation added"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/annotation_layer/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/annotation_layer/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/annotation_layer/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationLayerRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/annotation_layer/{pk}/annotation/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotations bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete annotation layers",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of annotation layers, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer id for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of annotation ids",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Annotations"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of annotation layers",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation added"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/annotation_layer/{pk}/annotation/{annotation_id}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The annotation pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "annotation_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The annotation pk",
|
||||
"in": "path",
|
||||
"name": "annotation_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.get"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The annotation layer pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The annotation pk for this annotation",
|
||||
"in": "path",
|
||||
"name": "annotation_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AnnotationRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Annotation changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update an annotation layer",
|
||||
"tags": [
|
||||
"Annotation Layers"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
117
.ai/openapi/superset/paths/assets.json
Normal file
117
.ai/openapi/superset/paths/assets.json
Normal file
@@ -0,0 +1,117 @@
|
||||
{
|
||||
"/api/v1/assets/export/": {
|
||||
"get": {
|
||||
"description": "Gets a ZIP file with all the Superset assets (databases, datasets, charts, dashboards, saved queries) as YAML files.",
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/zip": {
|
||||
"schema": {
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "ZIP file"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Export all assets",
|
||||
"tags": [
|
||||
"Import/export"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/assets/import/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"multipart/form-data": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"bundle": {
|
||||
"description": "upload file (ZIP or JSON)",
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
},
|
||||
"passwords": {
|
||||
"description": "JSON map of passwords for each featured database in the ZIP file. If the ZIP includes a database config in the path `databases/MyDatabase.yaml`, the password should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"sparse": {
|
||||
"description": "allow sparse update of resources",
|
||||
"type": "boolean"
|
||||
},
|
||||
"ssh_tunnel_passwords": {
|
||||
"description": "JSON map of passwords for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the password should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"ssh_tunnel_private_key_passwords": {
|
||||
"description": "JSON map of private_key_passwords for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the private_key should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_private_key_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"ssh_tunnel_private_keys": {
|
||||
"description": "JSON map of private_keys for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the private_key should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_private_key\"}`.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Assets import result"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Import multiple assets",
|
||||
"tags": [
|
||||
"Import/export"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
78
.ai/openapi/superset/paths/async_event.json
Normal file
78
.ai/openapi/superset/paths/async_event.json
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"/api/v1/async_event/": {
|
||||
"get": {
|
||||
"description": "Reads off of the Redis events stream, using the user's JWT token and optional query params for last event received.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Last ID received by the client",
|
||||
"in": "query",
|
||||
"name": "last_id",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"channel_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"errors": {
|
||||
"items": {
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"job_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"result_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"type": "string"
|
||||
},
|
||||
"user_id": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Async event results"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Read off of the Redis events stream",
|
||||
"tags": [
|
||||
"AsyncEventsRestApi"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
38
.ai/openapi/superset/paths/available_domains.json
Normal file
38
.ai/openapi/superset/paths/available_domains.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"/api/v1/available_domains/": {
|
||||
"get": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/AvailableDomainsSchema"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "a list of available domains"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get all available domains",
|
||||
"tags": [
|
||||
"Available Domains"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
38
.ai/openapi/superset/paths/cachekey.json
Normal file
38
.ai/openapi/superset/paths/cachekey.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"/api/v1/cachekey/invalidate": {
|
||||
"post": {
|
||||
"description": "Takes a list of datasources, finds and invalidates the associated cache records and removes the database records.",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/CacheInvalidationRequestSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "A list of datasources uuid or the tuples of database and datasource names",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "cache was successfully invalidated"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Invalidate cache records and remove the database records",
|
||||
"tags": [
|
||||
"CacheRestApi"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
1244
.ai/openapi/superset/paths/chart.json
Normal file
1244
.ai/openapi/superset/paths/chart.json
Normal file
File diff suppressed because it is too large
Load Diff
578
.ai/openapi/superset/paths/css_template.json
Normal file
578
.ai/openapi/superset/paths/css_template.json
Normal file
@@ -0,0 +1,578 @@
|
||||
{
|
||||
"/api/v1/css_template/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "CSS templates bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete CSS templates",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of CSS templates, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of CSS templates",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Model schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item inserted"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a CSS template",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/css_template/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/css_template/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/css_template/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a CSS template",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a CSS template",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Model schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/CssTemplateRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update a CSS template",
|
||||
"tags": [
|
||||
"CSS Templates"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
2220
.ai/openapi/superset/paths/dashboard.json
Normal file
2220
.ai/openapi/superset/paths/dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
1939
.ai/openapi/superset/paths/database.json
Normal file
1939
.ai/openapi/superset/paths/database.json
Normal file
File diff suppressed because it is too large
Load Diff
1222
.ai/openapi/superset/paths/dataset.json
Normal file
1222
.ai/openapi/superset/paths/dataset.json
Normal file
File diff suppressed because it is too large
Load Diff
95
.ai/openapi/superset/paths/datasource.json
Normal file
95
.ai/openapi/superset/paths/datasource.json
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"/api/v1/datasource/{datasource_type}/{datasource_id}/column/{column_name}/values/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The type of datasource",
|
||||
"in": "path",
|
||||
"name": "datasource_type",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The id of the datasource",
|
||||
"in": "path",
|
||||
"name": "datasource_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The name of the column to get values for",
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "A List of distinct values for the column"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get possible values for a datasource column",
|
||||
"tags": [
|
||||
"Datasources"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
97
.ai/openapi/superset/paths/embedded_dashboard.json
Normal file
97
.ai/openapi/superset/paths/embedded_dashboard.json
Normal file
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"/api/v1/embedded_dashboard/{uuid}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The embedded configuration uuid",
|
||||
"in": "path",
|
||||
"name": "uuid",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The ui config of embedded dashboard (optional).",
|
||||
"in": "query",
|
||||
"name": "uiConfig",
|
||||
"schema": {
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Show filters (optional).",
|
||||
"in": "query",
|
||||
"name": "show_filters",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Expand filters (optional).",
|
||||
"in": "query",
|
||||
"name": "expand_filters",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Native filters key to apply filters. (optional).",
|
||||
"in": "query",
|
||||
"name": "native_filters_key",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Permalink key to apply filters. (optional).",
|
||||
"in": "query",
|
||||
"name": "permalink_key",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/EmbeddedDashboardResponseSchema"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"text/html": {
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Result contains the embedded dashboard configuration"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a report schedule log",
|
||||
"tags": [
|
||||
"Embedded Dashboard"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
437
.ai/openapi/superset/paths/explore.json
Normal file
437
.ai/openapi/superset/paths/explore.json
Normal file
@@ -0,0 +1,437 @@
|
||||
{
|
||||
"/api/v1/explore/": {
|
||||
"get": {
|
||||
"description": "Assembles Explore related information (form_data, slice, dataset) in a single endpoint.<br/><br/> The information can be assembled from:<br/> - The cache using a form_data_key<br/> - The metadata database using a permalink_key<br/> - Build from scratch using dataset or slice identifiers.",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "query",
|
||||
"name": "form_data_key",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "permalink_key",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "slice_id",
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "datasource_id",
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "datasource_type",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExploreContextSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Returns the initial context."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Assemble Explore related information in a single endpoint",
|
||||
"tags": [
|
||||
"Explore"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/explore/form_data": {
|
||||
"post": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "query",
|
||||
"name": "tab_id",
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/FormDataPostSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "The key to retrieve the form_data.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The form_data was stored successfully."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a new form_data",
|
||||
"tags": [
|
||||
"Explore Form Data"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/explore/form_data/{key}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The form_data key.",
|
||||
"in": "path",
|
||||
"name": "key",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"description": "The result of the operation",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Deleted the stored form_data."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a form_data",
|
||||
"tags": [
|
||||
"Explore Form Data"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "key",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"form_data": {
|
||||
"description": "The stored form_data",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Returns the stored form_data."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a form_data",
|
||||
"tags": [
|
||||
"Explore Form Data"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "key",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "tab_id",
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/FormDataPutSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "The key to retrieve the form_data.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The form_data was stored successfully."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update an existing form_data",
|
||||
"tags": [
|
||||
"Explore Form Data"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/explore/permalink": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExplorePermalinkStateSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "The key to retrieve the permanent link data.",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"description": "permanent link.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The permanent link was stored successfully."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a new permanent link",
|
||||
"tags": [
|
||||
"Explore Permanent Link"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/explore/permalink/{key}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "key",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"state": {
|
||||
"description": "The stored state",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Returns the stored form_data."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get chart's permanent link state",
|
||||
"tags": [
|
||||
"Explore Permanent Link"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
327
.ai/openapi/superset/paths/log.json
Normal file
327
.ai/openapi/superset/paths/log.json
Normal file
@@ -0,0 +1,327 @@
|
||||
{
|
||||
"/api/v1/log/": {
|
||||
"get": {
|
||||
"description": "Gets a list of logs, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/LogRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of logs",
|
||||
"tags": [
|
||||
"LogRestApi"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/LogRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Model schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/LogRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item inserted"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"LogRestApi"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/log/recent_activity/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The id of the user",
|
||||
"in": "path",
|
||||
"name": "user_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_recent_activity_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RecentActivityResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "A List of recent activity objects"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get recent activity data for a user",
|
||||
"tags": [
|
||||
"LogRestApi"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/log/{pk}": {
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/LogRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a log detail information",
|
||||
"tags": [
|
||||
"LogRestApi"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
100
.ai/openapi/superset/paths/me.json
Normal file
100
.ai/openapi/superset/paths/me.json
Normal file
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"/api/v1/me/": {
|
||||
"get": {
|
||||
"description": "Gets the user object corresponding to the agent making the request, or returns a 401 error if the user is unauthenticated.",
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/UserResponseSchema"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The current user"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
}
|
||||
},
|
||||
"summary": "Get the user object",
|
||||
"tags": [
|
||||
"Current User"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "Updates the current user's first name, last name, or password.",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/CurrentUserPutSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/UserResponseSchema"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "User updated successfully"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
}
|
||||
},
|
||||
"summary": "Update the current user",
|
||||
"tags": [
|
||||
"Current User"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/me/roles/": {
|
||||
"get": {
|
||||
"description": "Gets the user roles corresponding to the agent making the request, or returns a 401 error if the user is unauthenticated.",
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/UserResponseSchema"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The current user"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
}
|
||||
},
|
||||
"summary": "Get the user roles",
|
||||
"tags": [
|
||||
"Current User"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
63
.ai/openapi/superset/paths/menu.json
Normal file
63
.ai/openapi/superset/paths/menu.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"/api/v1/menu/": {
|
||||
"get": {
|
||||
"description": "Get the menu data structure. Returns a forest like structure with the menu the user has access to",
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"description": "Menu items in a forest like data structure",
|
||||
"items": {
|
||||
"properties": {
|
||||
"childs": {
|
||||
"items": {
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"icon": {
|
||||
"description": "Icon name to show for this menu item",
|
||||
"type": "string"
|
||||
},
|
||||
"label": {
|
||||
"description": "Pretty name for the menu item",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The internal menu item name, maps to permission_name",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"description": "The URL for the menu item",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Get menu data"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Menu"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
43
.ai/openapi/superset/paths/misc.json
Normal file
43
.ai/openapi/superset/paths/misc.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"/api/{version}/_openapi": {
|
||||
"get": {
|
||||
"description": "Get the OpenAPI spec for a specific API version",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "version",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The OpenAPI spec"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"OpenApi"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
443
.ai/openapi/superset/paths/query.json
Normal file
443
.ai/openapi/superset/paths/query.json
Normal file
@@ -0,0 +1,443 @@
|
||||
{
|
||||
"/api/v1/query/": {
|
||||
"get": {
|
||||
"description": "Gets a list of queries, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/QueryRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of queries",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/query/distinct/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/DistincResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Distinct field data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get distinct values from field data",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/query/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/query/stop": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/StopQuerySchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Stop query schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Query stopped"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Manually stop a query with client_id",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/query/updated_since": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/queries_get_updated_since_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"description": "A List of queries that changed after last_updated_ms",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/QueryRestApi.get"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Queries list"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of queries that changed after last_updated_ms",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/query/{pk}": {
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/QueryRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get query detail information",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
825
.ai/openapi/superset/paths/report.json
Normal file
825
.ai/openapi/superset/paths/report.json
Normal file
@@ -0,0 +1,825 @@
|
||||
{
|
||||
"/api/v1/report/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Report Schedule bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete report schedules",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of report schedules, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of report schedules",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Report Schedule schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Report schedule added"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a report schedule",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/slack_channels/": {
|
||||
"get": {
|
||||
"description": "Get slack channels",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_slack_channels_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Slack channels"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get slack channels",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The report schedule pk",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a report schedule",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a report schedule",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The Report Schedule pk",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Report Schedule schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ReportScheduleRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Report Schedule changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update a report schedule",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/{pk}/log/": {
|
||||
"get": {
|
||||
"description": "Gets a list of report schedule logs, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The report schedule id for these logs",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of log ids",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ReportExecutionLogRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from logs"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of report schedule logs",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/report/{pk}/log/{log_id}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The report schedule pk for log",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "The log pk",
|
||||
"in": "path",
|
||||
"name": "log_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"description": "The log id",
|
||||
"type": "string"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ReportExecutionLogRestApi.get"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item log"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a report schedule log",
|
||||
"tags": [
|
||||
"Report Schedules"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
591
.ai/openapi/superset/paths/rowlevelsecurity.json
Normal file
591
.ai/openapi/superset/paths/rowlevelsecurity.json
Normal file
@@ -0,0 +1,591 @@
|
||||
{
|
||||
"/api/v1/rowlevelsecurity/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "RLS Rule bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete RLS rules",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of RLS, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of RLS",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "RLS schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "RLS Rule added"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a new RLS rule",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/rowlevelsecurity/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/rowlevelsecurity/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/rowlevelsecurity/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete an RLS",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get an RLS",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The Rule pk",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "RLS schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/RLSRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Rule changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update an RLS rule",
|
||||
"tags": [
|
||||
"Row Level Security"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
766
.ai/openapi/superset/paths/saved_query.json
Normal file
766
.ai/openapi/superset/paths/saved_query.json
Normal file
@@ -0,0 +1,766 @@
|
||||
{
|
||||
"/api/v1/saved_query/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Saved queries bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete saved queries",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of saved queries, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of saved queries",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Model schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item inserted"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a saved query",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/distinct/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/DistincResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Distinct field data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get distinct values from field data",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/export/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_export_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/zip": {
|
||||
"schema": {
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "A zip file with saved query(ies) and database(s) as YAML"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Download multiple saved queries as YAML files",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/import/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"multipart/form-data": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"formData": {
|
||||
"description": "upload file (ZIP)",
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
},
|
||||
"overwrite": {
|
||||
"description": "overwrite existing saved queries?",
|
||||
"type": "boolean"
|
||||
},
|
||||
"passwords": {
|
||||
"description": "JSON map of passwords for each featured database in the ZIP file. If the ZIP includes a database config in the path `databases/MyDatabase.yaml`, the password should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"ssh_tunnel_passwords": {
|
||||
"description": "JSON map of passwords for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the password should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"ssh_tunnel_private_key_passwords": {
|
||||
"description": "JSON map of private_key_passwords for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the private_key should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_private_key_password\"}`.",
|
||||
"type": "string"
|
||||
},
|
||||
"ssh_tunnel_private_keys": {
|
||||
"description": "JSON map of private_keys for each ssh_tunnel associated to a featured database in the ZIP file. If the ZIP includes a ssh_tunnel config in the path `databases/MyDatabase.yaml`, the private_key should be provided in the following format: `{\"databases/MyDatabase.yaml\": \"my_private_key\"}`.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Saved Query import result"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Import saved queries with associated databases",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/saved_query/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a saved query",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a saved query",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Model schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/SavedQueryRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update a saved query",
|
||||
"tags": [
|
||||
"Queries"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
3724
.ai/openapi/superset/paths/security.json
Normal file
3724
.ai/openapi/superset/paths/security.json
Normal file
File diff suppressed because it is too large
Load Diff
427
.ai/openapi/superset/paths/sqllab.json
Normal file
427
.ai/openapi/superset/paths/sqllab.json
Normal file
@@ -0,0 +1,427 @@
|
||||
{
|
||||
"/api/v1/sqllab/": {
|
||||
"get": {
|
||||
"description": "Assembles SQLLab bootstrap data (active_tab, databases, queries, tab_state_ids) in a single endpoint. The data can be assembled from the current user's id.",
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/SQLLabBootstrapSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Returns the initial bootstrap data for SqlLab"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get the bootstrap data for SqlLab page",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/estimate/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/EstimateQueryCostSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "SQL query and params",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Query estimation result"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Estimate the SQL query execution cost",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/execute/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExecutePayloadSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "SQL query and params",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/QueryExecutionResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Query execution result"
|
||||
},
|
||||
"202": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/QueryExecutionResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Query execution result, query still running"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Execute a SQL query",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/export/{client_id}/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The SQL query result identifier",
|
||||
"in": "path",
|
||||
"name": "client_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"text/csv": {
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "SQL query results"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Export the SQL query results to a CSV",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/format_sql/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/FormatQueryPayloadSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "SQL query",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Format SQL result"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Format SQL code",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/permalink": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ExplorePermalinkStateSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "The key to retrieve the permanent link data.",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"description": "permanent link.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "The permanent link was stored successfully."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a new permanent link",
|
||||
"tags": [
|
||||
"SQL Lab Permanent Link"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/permalink/{key}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "key",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"state": {
|
||||
"description": "The stored state",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Returns the stored form_data."
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get permanent link state for SQLLab editor.",
|
||||
"tags": [
|
||||
"SQL Lab Permanent Link"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/sqllab/results/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/sql_lab_get_results_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/QueryExecutionResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "SQL query execution result"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"410": {
|
||||
"$ref": "#/components/responses/410"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get the result of a SQL query execution",
|
||||
"tags": [
|
||||
"SQL Lab"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
994
.ai/openapi/superset/paths/tag.json
Normal file
994
.ai/openapi/superset/paths/tag.json
Normal file
@@ -0,0 +1,994 @@
|
||||
{
|
||||
"/api/v1/tag/": {
|
||||
"delete": {
|
||||
"description": "Bulk deletes tags. This will remove all tagged objects with this tag.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/delete_tags_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Deletes multiple Tags"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete tags",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get a list of tags, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/TagRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of tags",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "Create a new Tag",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/TagRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/TagRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag added"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a tag",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about tag API endpoints",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/bulk_create": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/TagPostBulkSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/TagPostBulkResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Bulk created tags and tagged objects"
|
||||
},
|
||||
"302": {
|
||||
"description": "Redirects to the current digest"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk create tags and tagged objects",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/favorite_status/": {
|
||||
"get": {
|
||||
"description": "Get favorited tags for current user",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_fav_star_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/GetFavStarIdsSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "None"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/get_objects/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "tag_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/TaggedObjectEntityResponseSchema"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "List of tagged objects associated with a Tag"
|
||||
},
|
||||
"302": {
|
||||
"description": "Redirects to the current digest"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get all objects associated with a tag",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/{object_type}/{object_id}/": {
|
||||
"post": {
|
||||
"description": "Adds tags to an object. Creates new tags if they do not already exist.",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "object_type",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "path",
|
||||
"name": "object_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"tags": {
|
||||
"description": "list of tag names to add to object",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "Tag added"
|
||||
},
|
||||
"302": {
|
||||
"description": "Redirects to the current digest"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Add tags to an object",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/{object_type}/{object_id}/{tag}/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "tag",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "path",
|
||||
"name": "object_type",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"in": "path",
|
||||
"name": "object_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Chart delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a tagged object",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item deleted"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a tag",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/TagRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a tag detail information",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "Changes a Tag.",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/TagRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Chart schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/TagRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag changed"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update a tag",
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/tag/{pk}/favorites/": {
|
||||
"delete": {
|
||||
"description": "Remove the tag from the user favorite list",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag removed from favorites"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "Marks the tag as favorite for the current user",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Tag added to favorites"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Tags"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
907
.ai/openapi/superset/paths/theme.json
Normal file
907
.ai/openapi/superset/paths/theme.json
Normal file
@@ -0,0 +1,907 @@
|
||||
{
|
||||
"/api/v1/theme/": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_delete_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Themes bulk delete"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Bulk delete themes",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Gets a list of themes, use Rison or JSON query parameters for filtering, sorting, pagination and for selecting specific columns and metadata.",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_list_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "The total record count on the backend",
|
||||
"type": "number"
|
||||
},
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ids": {
|
||||
"description": "A list of item ids, useful when you don't know the column id",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"list_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"list_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "List Items",
|
||||
"type": "string"
|
||||
},
|
||||
"order_columns": {
|
||||
"description": "A list of allowed columns to sort",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"result": {
|
||||
"description": "The result from the get list query",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.get_list"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Items from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a list of themes",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.post"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.post"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme created"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Create a theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/_info": {
|
||||
"get": {
|
||||
"description": "Get metadata information about this API resource",
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_info_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"add_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"edit_columns": {
|
||||
"type": "object"
|
||||
},
|
||||
"filters": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"items": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The filter name. Will be translated by babel",
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"description": "The filter operation key to use on list filters",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"permissions": {
|
||||
"description": "The user permissions for this API resource",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get metadata information about this API resource",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/export/": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_export_ids_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/zip": {
|
||||
"schema": {
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme export"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Download multiple themes as YAML files",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/import/": {
|
||||
"post": {
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"multipart/form-data": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"formData": {
|
||||
"format": "binary",
|
||||
"type": "string"
|
||||
},
|
||||
"overwrite": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme imported"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Import themes from a ZIP file",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/related/{column_name}": {
|
||||
"get": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "column_name",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_related_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RelatedResponseSchema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Related column data"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get related fields data",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/unset_system_dark": {
|
||||
"delete": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "System dark theme cleared"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Clear the system dark theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/unset_system_default": {
|
||||
"delete": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "System default theme cleared"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Clear the system default theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/{pk}": {
|
||||
"delete": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme deleted"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Delete a theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "Get an item model",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/get_item_schema"
|
||||
}
|
||||
}
|
||||
},
|
||||
"in": "query",
|
||||
"name": "q"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"description_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The description for the column name. Will be translated by babel",
|
||||
"example": "A Nice description for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The item id",
|
||||
"type": "string"
|
||||
},
|
||||
"label_columns": {
|
||||
"properties": {
|
||||
"column_name": {
|
||||
"description": "The label for the column name. Will be translated by babel",
|
||||
"example": "A Nice label for the column",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.get"
|
||||
},
|
||||
"show_columns": {
|
||||
"description": "A list of columns",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"show_title": {
|
||||
"description": "A title to render. Will be translated by babel",
|
||||
"example": "Show Item Details",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Item from Model"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Get a theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.put"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme schema",
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "number"
|
||||
},
|
||||
"result": {
|
||||
"$ref": "#/components/schemas/ThemeRestApi.put"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme updated"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Update a theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/{pk}/set_system_dark": {
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The theme id",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme successfully set as system dark"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Set a theme as the system dark theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/theme/{pk}/set_system_default": {
|
||||
"put": {
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The theme id",
|
||||
"in": "path",
|
||||
"name": "pk",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"result": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Theme successfully set as system default"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/400"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/403"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
},
|
||||
"422": {
|
||||
"$ref": "#/components/responses/422"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/500"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"jwt": []
|
||||
}
|
||||
],
|
||||
"summary": "Set a theme as the system default theme",
|
||||
"tags": [
|
||||
"Themes"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
33
.ai/openapi/superset/paths/user.json
Normal file
33
.ai/openapi/superset/paths/user.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"/api/v1/user/{user_id}/avatar.png": {
|
||||
"get": {
|
||||
"description": "Gets the avatar URL for the user with the given ID, or returns a 401 error if the user is unauthenticated.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The ID of the user",
|
||||
"in": "path",
|
||||
"name": "user_id",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"301": {
|
||||
"description": "A redirect to the user's avatar URL"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/401"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/404"
|
||||
}
|
||||
},
|
||||
"summary": "Get the user avatar",
|
||||
"tags": [
|
||||
"User"
|
||||
]
|
||||
}
|
||||
}
|
||||
}\n
|
||||
File diff suppressed because it is too large
Load Diff
1
.gitignore
vendored
1
.gitignore
vendored
@@ -77,3 +77,4 @@ node_modules/
|
||||
.venv/
|
||||
coverage/
|
||||
*.tmp
|
||||
logs/app.log.1
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"mcpServers":{"axiom-core":{"command":"/home/busya/dev/ast-mcp-core-server/.venv/bin/python","args":["-c","from src.server import main; main()"],"env":{"PYTHONPATH":"/home/busya/dev/ast-mcp-core-server"},"alwaysAllow":["read_grace_outline_tool","ast_search_tool","get_semantic_context_tool","build_task_context_tool","audit_contracts_tool","diff_contract_semantics_tool","simulate_patch_tool","patch_contract_tool","rename_contract_id_tool","move_contract_tool","extract_contract_tool","infer_missing_relations_tool","map_runtime_trace_to_contracts_tool","scaffold_contract_tests_tool","search_contracts_tool","reindex_workspace_tool","prune_contract_metadata_tool","workspace_semantic_health_tool","trace_tests_for_contract_tool"]}}}
|
||||
{"mcpServers":{"axiom-core":{"command":"/home/busya/dev/ast-mcp-core-server/.venv/bin/python","args":["-c","from src.server import main; main()"],"env":{"PYTHONPATH":"/home/busya/dev/ast-mcp-core-server"},"alwaysAllow":["read_grace_outline_tool","ast_search_tool","get_semantic_context_tool","build_task_context_tool","audit_contracts_tool","diff_contract_semantics_tool","simulate_patch_tool","patch_contract_tool","rename_contract_id_tool","move_contract_tool","extract_contract_tool","infer_missing_relations_tool","map_runtime_trace_to_contracts_tool","scaffold_contract_tests_tool","search_contracts_tool","reindex_workspace_tool","prune_contract_metadata_tool","workspace_semantic_health_tool","trace_tests_for_contract_tool","guarded_patch_contract_tool","impact_analysis_tool"]}}}
|
||||
@@ -51,6 +51,10 @@ Auto-generated from all feature plans. Last updated: 2025-12-19
|
||||
- Existing auth database (`AUTH_DATABASE_URL`) with a dedicated per-user preference entity (024-user-dashboard-filter)
|
||||
- Python 3.9+ (Backend), Node.js 18+ / Svelte 5.x (Frontend) + FastAPI, SQLAlchemy, APScheduler (Backend) | SvelteKit, Tailwind CSS, existing UI components (Frontend) (026-dashboard-health-windows)
|
||||
- PostgreSQL / SQLite (existing database for `ValidationRecord` and new `ValidationPolicy`) (026-dashboard-health-windows)
|
||||
- Python 3.9+ backend, Node.js 18+ frontend with Svelte 5 / SvelteKit + FastAPI, SQLAlchemy, Pydantic, existing [SupersetClient](../../backend/src/core/superset_client.py), existing frontend API wrapper patterns, Svelte runes, existing task/websocket stack (027-dataset-llm-orchestration)
|
||||
- Existing application databases plus filesystem-backed uploaded semantic sources; reuse current configuration and task persistence stores (027-dataset-llm-orchestration)
|
||||
- Python 3.9+ backend, Node.js 18+ frontend, Svelte 5 / SvelteKit frontend runtime + FastAPI, SQLAlchemy, Pydantic, existing `TaskManager`, existing `SupersetClient`, existing LLM provider stack, SvelteKit, Tailwind CSS, frontend `requestApi`/`fetchApi` wrappers (027-dataset-llm-orchestration)
|
||||
- Existing application databases for persistent session/domain entities; existing tasks database for async execution metadata; filesystem for optional uploaded semantic sources/artifacts (027-dataset-llm-orchestration)
|
||||
|
||||
- Python 3.9+ (Backend), Node.js 18+ (Frontend Build) (001-plugin-arch-svelte-ui)
|
||||
|
||||
@@ -71,9 +75,9 @@ cd src; pytest; ruff check .
|
||||
Python 3.9+ (Backend), Node.js 18+ (Frontend Build): Follow standard conventions
|
||||
|
||||
## Recent Changes
|
||||
- 027-dataset-llm-orchestration: Added Python 3.9+ backend, Node.js 18+ frontend, Svelte 5 / SvelteKit frontend runtime + FastAPI, SQLAlchemy, Pydantic, existing `TaskManager`, existing `SupersetClient`, existing LLM provider stack, SvelteKit, Tailwind CSS, frontend `requestApi`/`fetchApi` wrappers
|
||||
- 027-dataset-llm-orchestration: Added Python 3.9+ backend, Node.js 18+ frontend with Svelte 5 / SvelteKit + FastAPI, SQLAlchemy, Pydantic, existing [SupersetClient](../../backend/src/core/superset_client.py), existing frontend API wrapper patterns, Svelte runes, existing task/websocket stack
|
||||
- 026-dashboard-health-windows: Added Python 3.9+ (Backend), Node.js 18+ / Svelte 5.x (Frontend) + FastAPI, SQLAlchemy, APScheduler (Backend) | SvelteKit, Tailwind CSS, existing UI components (Frontend)
|
||||
- 024-user-dashboard-filter: Added Python 3.9+ (backend), Node.js 18+ + SvelteKit (frontend) + FastAPI, SQLAlchemy, Pydantic, existing auth stack (`get_current_user`), existing dashboards route/service, Svelte runes (`$state`, `$derived`, `$effect`), Tailwind CSS, frontend `api` wrapper
|
||||
- 020-clean-repo-enterprise: Added Python 3.9+ (backend scripts/services), Shell (release tooling) + FastAPI stack (existing backend), ConfigManager, TaskManager, файловые утилиты, internal artifact registries
|
||||
|
||||
|
||||
<!-- MANUAL ADDITIONS START -->
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
---
|
||||
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md
|
||||
handoffs:
|
||||
- label: Verify Changes
|
||||
agent: speckit.test
|
||||
prompt: Verify the implementation of...
|
||||
- label: Audit & Verify (Tester)
|
||||
agent: tester
|
||||
prompt: Perform semantic audit, algorithm emulation, and unit test verification for the completed tasks.
|
||||
send: true
|
||||
- label: Orchestration Control
|
||||
agent: orchestrator
|
||||
prompt: Review Tester's feedback and coordinate next steps.
|
||||
send: true
|
||||
---
|
||||
|
||||
@@ -118,10 +122,20 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
7. Implementation execution rules:
|
||||
- **Strict Adherence**: Apply `.ai/standards/semantics.md` rules:
|
||||
- Every file MUST start with a `[DEF:id:Type]` header and end with a closing `[/DEF:id:Type]` anchor.
|
||||
- Include `@TIER` and define contracts (`@PRE`, `@POST`).
|
||||
- For Svelte components, use `@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY`, and explicitly declare reactivity with `@UX_REATIVITY: State: $state, Derived: $derived`.
|
||||
- **Molecular Topology Logging**: Use prefixes `[EXPLORE]`, `[REASON]`, `[REFLECT]` in logs to trace logic.
|
||||
- Every file MUST start with a `[DEF:id:Type]` header and end with a matching closing `[/DEF:id:Type]` anchor.
|
||||
- Use `@COMPLEXITY` / `@C:` as the primary control tag; treat `@TIER` only as legacy compatibility metadata.
|
||||
- Contract density MUST match effective complexity from [`.ai/standards/semantics.md`](.ai/standards/semantics.md):
|
||||
- Complexity 1: anchors only.
|
||||
- Complexity 2: require `@PURPOSE`.
|
||||
- Complexity 3: require `@PURPOSE` and `@RELATION`.
|
||||
- Complexity 4: require `@PURPOSE`, `@RELATION`, `@PRE`, `@POST`, `@SIDE_EFFECT`.
|
||||
- Complexity 5: require full level-4 contract plus `@DATA_CONTRACT` and `@INVARIANT`.
|
||||
- For Python Complexity 4+ modules, implementation MUST include a meaningful semantic logging path using `logger.reason()` and `logger.reflect()`.
|
||||
- For Python Complexity 5 modules, `belief_scope(...)` is mandatory and the critical path must be irrigated with `logger.reason()` / `logger.reflect()` according to the contract.
|
||||
- For Svelte components, require `@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY`, and `@UX_REACTIVITY`; runes-only reactivity is allowed (`$state`, `$derived`, `$effect`, `$props`).
|
||||
- Reject pseudo-semantic markup: docstrings containing loose `@PURPOSE` / `@PRE` text do **NOT** satisfy the protocol unless represented in canonical anchored metadata blocks.
|
||||
- **Self-Audit**: The Coder MUST use `axiom-core` tools (like `audit_contracts_tool`) to verify semantic compliance before completion.
|
||||
- **Semantic Rejection Gate**: If self-audit reveals broken anchors, missing closing tags, missing required metadata for the effective complexity, orphaned critical classes/functions, or Complexity 4/5 Python code without required belief-state logging, the task is NOT complete and cannot be handed off as accepted work.
|
||||
- **CRITICAL Contracts**: If a task description contains a contract summary (e.g., `CRITICAL: PRE: ..., POST: ...`), these constraints are **MANDATORY** and must be strictly implemented in the code using guards/assertions (if applicable per protocol).
|
||||
- **Setup first**: Initialize project structure, dependencies, configuration
|
||||
- **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
|
||||
@@ -130,18 +144,50 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- **Polish and validation**: Unit tests, performance optimization, documentation
|
||||
|
||||
8. Progress tracking and error handling:
|
||||
- Report progress after each completed task
|
||||
- Halt execution if any non-parallel task fails
|
||||
- For parallel tasks [P], continue with successful tasks, report failed ones
|
||||
- Provide clear error messages with context for debugging
|
||||
- Suggest next steps if implementation cannot proceed
|
||||
- **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
|
||||
- Report progress after each completed task.
|
||||
- Halt execution if any non-parallel task fails.
|
||||
- For parallel tasks [P], continue with successful tasks, report failed ones.
|
||||
- Provide clear error messages with context for debugging.
|
||||
- Suggest next steps if implementation cannot proceed.
|
||||
- **IMPORTANT** For completed tasks, mark as [X] only AFTER local verification and self-audit.
|
||||
|
||||
9. Completion validation:
|
||||
- Verify all required tasks are completed
|
||||
- Check that implemented features match the original specification
|
||||
- Validate that tests pass and coverage meets requirements
|
||||
- Confirm the implementation follows the technical plan
|
||||
- Report final status with summary of completed work
|
||||
9. **Handoff to Tester (Audit Loop)**:
|
||||
- Once a task or phase is complete, the Coder hands off to the Tester.
|
||||
- Handoff includes: file paths, declared complexity, expected contracts (`@PRE`, `@POST`, `@SIDE_EFFECT`, `@DATA_CONTRACT`, `@INVARIANT` when applicable), and a short logic overview.
|
||||
- Handoff MUST explicitly disclose any contract exceptions or known semantic debt. Hidden semantic debt is forbidden.
|
||||
- The handoff payload MUST instruct the Tester to execute the dedicated testing workflow [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md), not just perform an informal review.
|
||||
|
||||
10. **Tester Verification & Orchestrator Gate**:
|
||||
- Tester MUST:
|
||||
- Explicitly run the [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md) workflow as the verification procedure for the delivered implementation batch.
|
||||
- Perform mandatory semantic audit (using `audit_contracts_tool`).
|
||||
- Reject code that only imitates the protocol superficially, such as free-form docstrings with `@PURPOSE` text but without canonical `[DEF]...[/DEF]` anchors and header metadata.
|
||||
- Verify that effective complexity and required metadata match [`.ai/standards/semantics.md`](.ai/standards/semantics.md).
|
||||
- Verify that Python Complexity 4/5 implementations include required belief-state instrumentation (`belief_scope`, `logger.reason()`, `logger.reflect()`).
|
||||
- Emulate algorithms "in mind" step-by-step to ensure logic consistency.
|
||||
- Verify unit tests match the declared contracts.
|
||||
- If Tester finds issues:
|
||||
- Emit `[AUDIT_FAIL: semantic_noncompliance | contract_mismatch | logic_mismatch | test_mismatch | speckit_test_not_run]`.
|
||||
- Provide concrete file-path-based reasons, for example: missing anchors, module/class contract mismatch, missing `@DATA_CONTRACT`, missing `logger.reason()`, illegal docstring-only annotations, or missing execution of [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md).
|
||||
- Notify the Orchestrator.
|
||||
- Orchestrator redirects the feedback to the Coder for remediation.
|
||||
- Orchestrator green-status rule:
|
||||
- The Orchestrator MUST NOT assign green/accepted status unless the Tester confirms that [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md) was executed.
|
||||
- Missing execution evidence for [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md) is an automatic gate failure even if the Tester verbally reports that the code "looks fine".
|
||||
- Acceptance (Final mark [X]):
|
||||
- Only after the Tester is satisfied with semantics, emulation, and tests.
|
||||
- Any semantic audit warning relevant to touched files blocks acceptance until remediated or explicitly waived by the user.
|
||||
- No final green status is allowed without explicit confirmation that [`.kilocode/workflows/speckit.test.md`](.kilocode/workflows/speckit.test.md) was run.
|
||||
|
||||
11. Completion validation:
|
||||
- Verify all required tasks are completed and accepted by the Tester.
|
||||
- Check that implemented features match the original specification.
|
||||
- Confirm the implementation follows the technical plan and GRACE standards.
|
||||
- Confirm touched files do not contain protocol-invalid patterns such as:
|
||||
- class/function-level docstring contracts standing in for canonical anchors,
|
||||
- missing closing anchors,
|
||||
- missing required metadata for declared complexity,
|
||||
- Complexity 5 repository/service code using only `belief_scope(...)` without explicit `logger.reason()` / `logger.reflect()` checkpoints.
|
||||
- Report final status with summary of completed and audited work.
|
||||
|
||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit.tasks` first to regenerate the task list.
|
||||
|
||||
@@ -73,13 +73,23 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Entity name, fields, relationships, validation rules.
|
||||
|
||||
2. **Design & Verify Contracts (Semantic Protocol)**:
|
||||
- **Drafting**: Define `[DEF:id:Type]` Headers, Contracts, and closing `[/DEF:id:Type]` for all new modules based on `.ai/standards/semantics.md`.
|
||||
- **TIER Classification**: Explicitly assign `@TIER: [CRITICAL|STANDARD|TRIVIAL]` to each module.
|
||||
- **CRITICAL Requirements**: For all CRITICAL modules, define full `@PRE`, `@POST`, and (if UI) `@UX_STATE` contracts. **MUST** also define testing contracts: `@TEST_CONTRACT`, `@TEST_FIXTURE`, `@TEST_EDGE`, and `@TEST_INVARIANT`.
|
||||
- **Drafting**: Define semantic headers, metadata, and closing anchors for all new modules strictly from `.ai/standards/semantics.md`.
|
||||
- **Complexity Classification**: Classify each contract with `@COMPLEXITY: [1|2|3|4|5]` or `@C:`. Treat `@TIER` only as a legacy compatibility hint and never as the primary rule source.
|
||||
- **Adaptive Contract Requirements**:
|
||||
- **Complexity 1**: anchors only; `@PURPOSE` optional.
|
||||
- **Complexity 2**: require `@PURPOSE`.
|
||||
- **Complexity 3**: require `@PURPOSE` and `@RELATION`; UI also requires `@UX_STATE`.
|
||||
- **Complexity 4**: require `@PURPOSE`, `@RELATION`, `@PRE`, `@POST`, `@SIDE_EFFECT`; Python modules must define a meaningful `logger.reason()` / `logger.reflect()` path or equivalent belief-state mechanism.
|
||||
- **Complexity 5**: require full level-4 contract plus `@DATA_CONTRACT` and `@INVARIANT`; Python modules must require `belief_scope`; UI modules must define UX contracts including `@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY`, and `@UX_REACTIVITY`.
|
||||
- **Relation Syntax**: Write dependency edges in canonical GraphRAG form: `@RELATION: [PREDICATE] ->[TARGET_ID]`.
|
||||
- **Context Guard**: If a target relation, DTO, or required dependency cannot be named confidently, stop generation and emit `[NEED_CONTEXT: target]` instead of inventing placeholders.
|
||||
- **Testing Contracts**: Add `@TEST_CONTRACT`, `@TEST_SCENARIO`, `@TEST_FIXTURE`, `@TEST_EDGE`, and `@TEST_INVARIANT` when the design introduces audit-critical or explicitly test-governed contracts, especially for Complexity 5 boundaries.
|
||||
- **Self-Review**:
|
||||
- *Completeness*: Do `@PRE`/`@POST` cover edge cases identified in Research? Are test contracts present for CRITICAL?
|
||||
- *Connectivity*: Do `@RELATION` tags form a coherent graph?
|
||||
- *Compliance*: Does syntax match `[DEF:id:Type]` exactly and is it closed with `[/DEF:id:Type]`?
|
||||
- *Complexity Fit*: Does each contract include exactly the metadata and contract density required by its complexity level?
|
||||
- *Completeness*: Do `@PRE`/`@POST`, `@SIDE_EFFECT`, `@DATA_CONTRACT`, and UX tags cover the edge cases identified in Research and UX Reference?
|
||||
- *Connectivity*: Do `@RELATION` tags form a coherent graph using canonical `@RELATION: [PREDICATE] ->[TARGET_ID]` syntax?
|
||||
- *Compliance*: Are all anchors properly opened and closed, and does the chosen comment syntax match the target medium?
|
||||
- *Belief-State Requirements*: Do Complexity 4/5 Python modules explicitly account for `logger.reason()`, `logger.reflect()`, and `belief_scope` requirements?
|
||||
- **Output**: Write verified contracts to `contracts/modules.md`.
|
||||
|
||||
3. **Simulate Contract Usage**:
|
||||
|
||||
@@ -70,11 +70,12 @@ The tasks.md should be immediately executable - each task must be specific enoug
|
||||
|
||||
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
|
||||
|
||||
### UX Preservation (CRITICAL)
|
||||
### UX & Semantic Preservation (CRITICAL)
|
||||
|
||||
- **Source of Truth**: `ux_reference.md` is the absolute standard for the "feel" of the feature.
|
||||
- **Violation Warning**: If any task would inherently violate the UX (e.g. "Remove progress bar to simplify code"), you **MUST** flag this to the user immediately.
|
||||
- **Verification Task**: You **MUST** add a specific task at the end of each User Story phase: `- [ ] Txxx [USx] Verify implementation matches ux_reference.md (Happy Path & Errors)`
|
||||
- **Source of Truth**: `ux_reference.md` for UX, `.ai/standards/semantics.md` for Code.
|
||||
- **Violation Warning**: If any task violates UX or GRACE standards, flag it immediately.
|
||||
- **Verification Task (UX)**: Add a task at the end of each Story phase: `- [ ] Txxx [USx] Verify implementation matches ux_reference.md (Happy Path & Errors)`
|
||||
- **Verification Task (Audit)**: Add a mandatory audit task at the end of each Story phase: `- [ ] Txxx [USx] Acceptance: Perform semantic audit & algorithm emulation by Tester`
|
||||
|
||||
### Checklist Format (REQUIRED)
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Goal
|
||||
|
||||
Execute full testing cycle: analyze code for testable modules, write tests with proper coverage, maintain test documentation, and ensure no test duplication or deletion.
|
||||
Execute semantic audit and full testing cycle: verify contract compliance, emulate logic, ensure maximum coverage, and maintain test quality.
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
@@ -56,16 +56,37 @@ Create coverage matrix:
|
||||
|--------|------|-----------|------|----------------------|
|
||||
| ... | ... | ... | ... | ... |
|
||||
|
||||
### 4. Write Tests (TDD Approach)
|
||||
### 4. Semantic Audit & Logic Emulation (CRITICAL)
|
||||
|
||||
Before writing tests, the Tester MUST:
|
||||
1. **Run `axiom-core.audit_contracts_tool`**: Identify semantic violations.
|
||||
2. **Run a protocol-shape review on touched files**:
|
||||
- Reject non-canonical semantic markup, including docstring-only annotations such as `@PURPOSE`, `@PRE`, or `@INVARIANT` written inside class/function docstrings without canonical `[DEF]...[/DEF]` anchors and header metadata.
|
||||
- Reject files whose effective complexity contract is under-specified relative to [`.ai/standards/semantics.md`](.ai/standards/semantics.md).
|
||||
- Reject Python Complexity 4+ modules that omit meaningful `logger.reason()` / `logger.reflect()` checkpoints.
|
||||
- Reject Python Complexity 5 modules that omit `belief_scope(...)`, `@DATA_CONTRACT`, or `@INVARIANT`.
|
||||
- Treat broken or missing closing anchors as blocking violations.
|
||||
3. **Emulate Algorithm**: Step through the code implementation in mind.
|
||||
- Verify it adheres to the `@PURPOSE` and `@INVARIANT`.
|
||||
- Verify `@PRE` and `@POST` conditions are correctly handled.
|
||||
4. **Validation Verdict**:
|
||||
- If audit fails: Emit `[AUDIT_FAIL: semantic_noncompliance]` with concrete file-path reasons and notify Orchestrator.
|
||||
- Example blocking case: [`backend/src/services/dataset_review/repositories/session_repository.py`](backend/src/services/dataset_review/repositories/session_repository.py) contains a module anchor, but its nested repository class/method semantics are expressed as loose docstrings instead of canonical anchored contracts; this MUST be rejected until remediated or explicitly waived.
|
||||
- If audit passes: Proceed to writing/verifying tests.
|
||||
|
||||
### 5. Write Tests (TDD Approach)
|
||||
|
||||
For each module requiring tests:
|
||||
|
||||
1. **Check existing tests**: Scan `__tests__/` for duplicates
|
||||
2. **Read TEST_FIXTURE**: If CRITICAL tier, read @TEST_FIXTURE from semantics header
|
||||
3. **Write test**: Follow co-location strategy
|
||||
1. **Check existing tests**: Scan `__tests__/` for duplicates.
|
||||
2. **Read TEST_FIXTURE**: If CRITICAL tier, read @TEST_FIXTURE from semantics header.
|
||||
3. **Do not normalize broken semantics through tests**:
|
||||
- The Tester must not write tests that silently accept malformed semantic protocol usage.
|
||||
- If implementation is semantically invalid, stop and reject instead of adapting tests around the invalid structure.
|
||||
4. **Write test**: Follow co-location strategy.
|
||||
- Python: `src/module/__tests__/test_module.py`
|
||||
- Svelte: `src/lib/components/__tests__/test_component.test.js`
|
||||
4. **Use mocks**: Use `unittest.mock.MagicMock` for external dependencies
|
||||
5. **Use mocks**: Use `unittest.mock.MagicMock` for external dependencies
|
||||
|
||||
### 4a. UX Contract Testing (Frontend Components)
|
||||
|
||||
@@ -162,6 +183,16 @@ Generate test execution report:
|
||||
- Failed: [X]
|
||||
- Skipped: [X]
|
||||
|
||||
## Semantic Audit Verdict
|
||||
|
||||
- Verdict: PASS | FAIL
|
||||
- Blocking Violations:
|
||||
- [file path] -> [reason]
|
||||
- Notes:
|
||||
- Reject docstring-only semantic pseudo-markup
|
||||
- Reject complexity/contract mismatches
|
||||
- Reject missing belief-state instrumentation for Python Complexity 4/5
|
||||
|
||||
## Issues Found
|
||||
|
||||
| Test | Error | Resolution |
|
||||
@@ -171,6 +202,7 @@ Generate test execution report:
|
||||
## Next Steps
|
||||
|
||||
- [ ] Fix failed tests
|
||||
- [ ] Fix blocking semantic violations before acceptance
|
||||
- [ ] Add more coverage for [module]
|
||||
- [ ] Review TEST_FIXTURE fixtures
|
||||
```
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
customModes:
|
||||
- slug: tester
|
||||
name: Tester
|
||||
description: QA and Test Engineer - Full Testing Cycle
|
||||
description: QA & Semantic Auditor - Verification Cycle
|
||||
roleDefinition: |-
|
||||
You are Kilo Code, acting as a QA and Test Engineer. Your primary goal is to ensure maximum test coverage, maintain test quality, and preserve existing tests.
|
||||
You are Kilo Code, acting as a QA and Semantic Auditor. Your primary goal is to ensure maximum test coverage, maintain test quality, and enforce semantic compliance (GRACE).
|
||||
Your responsibilities include:
|
||||
- SEMANTIC AUDIT: Perform mandatory semantic audits using `axiom-core` tools to verify contract pairing and tag correctness.
|
||||
- ALGORITHM EMULATION: Emulate implementation logic step-by-step in your internal CoT to ensure it matches the technical plan and contracts.
|
||||
- WRITING TESTS: Create comprehensive unit tests following TDD principles, using co-location strategy (`__tests__` directories).
|
||||
- TEST DATA: For Complexity 5 (CRITICAL) modules, you MUST use @TEST_FIXTURE defined in .ai/standards/semantics.md. Read and apply them in your tests.
|
||||
- DOCUMENTATION: Maintain test documentation in `specs/<feature>/tests/` directory with coverage reports and test case specifications.
|
||||
@@ -19,7 +21,11 @@ customModes:
|
||||
- mcp
|
||||
customInstructions: |
|
||||
1. KNOWLEDGE GRAPH: ALWAYS read .ai/ROOT.md first to understand the project structure and navigation.
|
||||
2. TEST MARKUP (Section VIII):
|
||||
2. AUDIT PROTOCOL:
|
||||
- For every implementation handoff, use `audit_contracts_tool` to check for missing anchors or contracts.
|
||||
- Perform step-by-step logic emulation for Complexity 4-5 modules.
|
||||
- If issues are found, emit `[AUDIT_FAIL: reason]` and pass to Orchestrator.
|
||||
3. TEST MARKUP (Section VIII):
|
||||
- Use short semantic IDs for modules (e.g., [DEF:AuthTests:Module]).
|
||||
- Use BINDS_TO only for major logic blocks (classes, complex mocks).
|
||||
- Helpers remain Complexity 1 (no @PURPOSE/@RELATION needed).
|
||||
@@ -38,6 +44,7 @@ customModes:
|
||||
Your purpose is to rigorously execute the workflows defined in `.kilocode/workflows/`.
|
||||
You act as the orchestrator for: - Specification (`speckit.specify`, `speckit.clarify`) - Planning (`speckit.plan`) - Task Management (`speckit.tasks`, `speckit.taskstoissues`) - Quality Assurance (`speckit.analyze`, `speckit.checklist`, `speckit.test`, `speckit.fix`) - Governance (`speckit.constitution`) - Implementation Oversight (`speckit.implement`)
|
||||
For each task, you must read the relevant workflow file from `.kilocode/workflows/` and follow its Execution Steps precisely.
|
||||
In Implementation (speckit.implement), you manage the acceptance loop between Coder and Tester.
|
||||
whenToUse: Use this mode when you need to run any /speckit.* command or when dealing with high-level feature planning, specification writing, or project management tasks.
|
||||
description: Executes SpecKit workflows for feature management
|
||||
customInstructions: 1. Always read `.ai/ROOT.md` first to understand the Knowledge Graph structure. 2. Read the specific workflow file in `.kilocode/workflows/` before executing a command. 3. Adhere strictly to the "Operating Constraints" and "Execution Steps" in the workflow files.
|
||||
@@ -49,14 +56,15 @@ customModes:
|
||||
source: project
|
||||
- slug: coder
|
||||
name: Coder
|
||||
roleDefinition: You are Kilo Code, acting as an Implementation Specialist. Your primary goal is to write code that strictly follows the Semantic Protocol defined in `.ai/standards/semantics.md`.
|
||||
roleDefinition: You are Kilo Code, acting as an Implementation Specialist. Your primary goal is to write code that strictly follows the Semantic Protocol defined in `.ai/standards/semantics.md` and passes self-audit.
|
||||
whenToUse: Use this mode when you need to implement features, write code, or fix issues based on test reports.
|
||||
description: Implementation Specialist - Semantic Protocol Compliant
|
||||
customInstructions: |
|
||||
1. KNOWLEDGE GRAPH: ALWAYS read .ai/ROOT.md first to understand the project structure and navigation.
|
||||
2. CONSTITUTION: Strictly follow architectural invariants in .ai/standards/constitution.md.
|
||||
3. SEMANTIC PROTOCOL: ALWAYS use .ai/standards/semantics.md as your source of truth for syntax.
|
||||
4. ANCHOR FORMAT: Use short semantic IDs (e.g., [DEF:AuthService:Class]).
|
||||
2. SELF-AUDIT: After implementation, use `axiom-core` tools to verify semantic compliance before handing off to Tester.
|
||||
3. CONSTITUTION: Strictly follow architectural invariants in .ai/standards/constitution.md.
|
||||
4. SEMANTIC PROTOCOL: ALWAYS use .ai/standards/semantics.md as your source of truth for syntax.
|
||||
5. ANCHOR FORMAT: Use short semantic IDs (e.g., [DEF:AuthService:Class]).
|
||||
5. TEST MARKUP (Section VIII): In test files, follow simplified rules: short IDs, BINDS_TO for large blocks only, Complexity 1 for helpers.
|
||||
6. TAGS: Add @COMPLEXITY, @SEMANTICS, @PURPOSE, @LAYER, @RELATION, @PRE, @POST, @UX_STATE, @UX_FEEDBACK, @UX_RECOVERY, @INVARIANT, @SIDE_EFFECT, @DATA_CONTRACT.
|
||||
4. COMPLEXITY COMPLIANCE (1-5):
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
|
||||
|
||||
[Gates determined based on constitution file]
|
||||
[Evaluate against constitution.md and semantics.md. Explicitly confirm semantic protocol compliance, complexity-driven contract coverage, UX-state compatibility, async boundaries, API-wrapper rules, RBAC/security constraints, and any required belief-state/logging constraints for Complexity 4/5 Python modules.]
|
||||
|
||||
## Project Structure
|
||||
|
||||
@@ -94,6 +94,22 @@ ios/ or android/
|
||||
**Structure Decision**: [Document the selected structure and reference the real
|
||||
directories captured above]
|
||||
|
||||
## Semantic Contract Guidance
|
||||
|
||||
> Use this section to drive Phase 1 artifacts, especially `contracts/modules.md`.
|
||||
|
||||
- Classify each planned module/component with `@COMPLEXITY: 1..5` or `@C:`.
|
||||
- Use `@TIER` only if backward compatibility is needed; never use it as the primary contract rule.
|
||||
- Match contract density to complexity:
|
||||
- Complexity 1: anchors only, `@PURPOSE` optional
|
||||
- Complexity 2: `@PURPOSE`
|
||||
- Complexity 3: `@PURPOSE`, `@RELATION`; UI also `@UX_STATE`
|
||||
- Complexity 4: `@PURPOSE`, `@RELATION`, `@PRE`, `@POST`, `@SIDE_EFFECT`; Python also meaningful `logger.reason()` / `logger.reflect()` path
|
||||
- Complexity 5: level 4 + `@DATA_CONTRACT`, `@INVARIANT`; Python also `belief_scope`; UI also `@UX_FEEDBACK`, `@UX_RECOVERY`, `@UX_REACTIVITY`
|
||||
- Write relations only in canonical form: `@RELATION: [PREDICATE] ->[TARGET_ID]`
|
||||
- If any relation target, DTO, or contract dependency is unknown, emit `[NEED_CONTEXT: target]` instead of inventing placeholders.
|
||||
- Preserve medium-appropriate anchor/comment syntax for Python, Svelte markup, and Svelte script contexts.
|
||||
|
||||
## Complexity Tracking
|
||||
|
||||
> **Fill ONLY if Constitution Check has violations that must be justified**
|
||||
|
||||
@@ -8,7 +8,7 @@ description: "Task list template for feature implementation"
|
||||
**Input**: Design documents from `/specs/[###-feature-name]/`
|
||||
**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/
|
||||
|
||||
**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
|
||||
**Tests**: Include test tasks whenever required by the feature specification, the semantic contracts, or any Complexity 5 / audit-critical boundary. Test work must trace to contract requirements, not only to implementation details.
|
||||
|
||||
**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
|
||||
|
||||
@@ -249,3 +249,7 @@ With multiple developers:
|
||||
- Commit after each task or logical group
|
||||
- Stop at any checkpoint to validate story independently
|
||||
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
|
||||
- Derive implementation tasks from semantic contracts in `contracts/modules.md`, especially `@PRE`, `@POST`, `@SIDE_EFFECT`, `@DATA_CONTRACT`, and UI `@UX_*` tags
|
||||
- For Complexity 4/5 Python modules, include tasks for belief-state logging paths with `logger.reason()`, `logger.reflect()`, and `belief_scope` where required
|
||||
- For Complexity 5 or explicitly test-governed contracts, include tasks that cover `@TEST_CONTRACT`, `@TEST_SCENARIO`, `@TEST_FIXTURE`, `@TEST_EDGE`, and `@TEST_INVARIANT`
|
||||
- Never create tasks from legacy `@TIER` alone; complexity is the primary execution signal
|
||||
|
||||
@@ -21,7 +21,9 @@ description: "Test documentation template for feature implementation"
|
||||
- [ ] Unit Tests (co-located in `__tests__/` directories)
|
||||
- [ ] Integration Tests (if needed)
|
||||
- [ ] E2E Tests (if critical user flows)
|
||||
- [ ] Contract Tests (for API endpoints)
|
||||
- [ ] Contract Tests (for API endpoints and semantic contract boundaries)
|
||||
- [ ] Semantic Contract Verification (`@PRE`, `@POST`, `@SIDE_EFFECT`, `@DATA_CONTRACT`, `@TEST_*`)
|
||||
- [ ] UX Contract Verification (`@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY`, `@UX_REACTIVITY`)
|
||||
|
||||
---
|
||||
|
||||
@@ -72,12 +74,14 @@ description: "Test documentation template for feature implementation"
|
||||
|
||||
### ✅ DO
|
||||
|
||||
1. Write tests BEFORE implementation (TDD approach)
|
||||
1. Write tests BEFORE implementation when the workflow permits it
|
||||
2. Use co-location: `src/module/__tests__/test_module.py`
|
||||
3. Use MagicMock for external dependencies (DB, Auth, APIs)
|
||||
4. Include semantic annotations: `# @RELATION: VERIFIES -> module.name`
|
||||
4. Trace tests to semantic contracts and DTO boundaries, not just filenames
|
||||
5. Test edge cases and error conditions
|
||||
6. **Test UX states** for Svelte components (@UX_STATE, @UX_FEEDBACK, @UX_RECOVERY)
|
||||
6. **Test UX contracts** for Svelte components (`@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY`, `@UX_REACTIVITY`)
|
||||
7. For Complexity 5 boundaries, verify `@DATA_CONTRACT`, invariants, and declared `@TEST_*` metadata
|
||||
8. For Complexity 4/5 Python flows, verify behavior around guards, side effects, and belief-state-driven logging paths where applicable
|
||||
|
||||
### ❌ DON'T
|
||||
|
||||
@@ -86,7 +90,8 @@ description: "Test documentation template for feature implementation"
|
||||
3. Test implementation details, not behavior
|
||||
4. Use real external services in unit tests
|
||||
5. Skip error handling tests
|
||||
6. **Skip UX contract tests** for CRITICAL frontend components
|
||||
6. **Skip UX contract tests** for critical frontend components
|
||||
7. Treat legacy `@TIER` as sufficient proof of test scope without checking actual complexity and contract metadata
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -39,15 +39,23 @@ $ command --flag value
|
||||
* **Key Elements**:
|
||||
* **[Button Name]**: Primary action. Color: Blue.
|
||||
* **[Input Field]**: Placeholder text: "Enter your name...". Validation: Real-time.
|
||||
* **Contract Mapping**:
|
||||
* **`@UX_STATE`**: Enumerate the explicit UI states that must appear later in `contracts/modules.md`
|
||||
* **`@UX_FEEDBACK`**: Define visible system reactions for success, validation, and failure
|
||||
* **`@UX_RECOVERY`**: Define what the user can do after failure or degraded state
|
||||
* **`@UX_REACTIVITY`**: Note expected Svelte rune bindings with `$state`, `$derived`, `$effect`, `$props`
|
||||
* **States**:
|
||||
* **Default**: Clean state, waiting for input.
|
||||
* **Idle/Default**: Clean state, waiting for input.
|
||||
* **Loading**: Skeleton loader replaces content area.
|
||||
* **Success**: Toast notification appears top-right: "Saved!" (Green).
|
||||
* **Success**: Toast notification appears top-right and state is recoverable without reload.
|
||||
* **Error/Degraded**: Visible failure state with explicit recovery path.
|
||||
|
||||
## 4. The "Error" Experience
|
||||
|
||||
**Philosophy**: Don't just report the error; guide the user to the fix.
|
||||
|
||||
**Semantic Requirement**: Every documented failure path here should map to `@UX_RECOVERY` and, where relevant, `@UX_FEEDBACK` in the generated component contracts.
|
||||
|
||||
### Scenario A: [Common Error, e.g. Invalid Input]
|
||||
|
||||
* **User Action**: Enters "123" in a text-only field.
|
||||
|
||||
184291
backend/logs/app.log.1
184291
backend/logs/app.log.1
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,18 @@
|
||||
# [DEF:backend.src.api.routes.__init__:Module]
|
||||
# [DEF:ApiRoutesModule:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: routes, lazy-import, module-registry
|
||||
# @PURPOSE: Provide lazy route module loading to avoid heavyweight imports during tests.
|
||||
# @LAYER: API
|
||||
# @RELATION: DEPENDS_ON -> importlib
|
||||
# @RELATION: [CALLS] ->[ApiRoutesGetAttr]
|
||||
# @INVARIANT: Only names listed in __all__ are importable via __getattr__.
|
||||
|
||||
__all__ = ['plugins', 'tasks', 'settings', 'connections', 'environments', 'mappings', 'migration', 'git', 'storage', 'admin', 'reports', 'assistant', 'clean_release', 'profile']
|
||||
__all__ = ['plugins', 'tasks', 'settings', 'connections', 'environments', 'mappings', 'migration', 'git', 'storage', 'admin', 'reports', 'assistant', 'clean_release', 'profile', 'dataset_review']
|
||||
|
||||
|
||||
# [DEF:__getattr__:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# [DEF:ApiRoutesGetAttr:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Lazily import route module by attribute name.
|
||||
# @RELATION: [DEPENDS_ON] ->[ApiRoutesModule]
|
||||
# @PRE: name is module candidate exposed in __all__.
|
||||
# @POST: Returns imported submodule or raises AttributeError.
|
||||
def __getattr__(name):
|
||||
@@ -19,5 +20,5 @@ def __getattr__(name):
|
||||
import importlib
|
||||
return importlib.import_module(f".{name}", __name__)
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
# [/DEF:__getattr__:Function]
|
||||
# [/DEF:backend.src.api.routes.__init__:Module]
|
||||
# [/DEF:ApiRoutesGetAttr:Function]
|
||||
# [/DEF:ApiRoutesModule:Module]
|
||||
|
||||
@@ -9,6 +9,7 @@ import asyncio
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# [DEF:test_clean_release_v2_api:Module]
|
||||
# [DEF:CleanReleaseV2ApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: API contract tests for redesigned clean release endpoints.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
@@ -90,4 +91,4 @@ def test_manifest_build_contract():
|
||||
assert "manifest_digest" in data
|
||||
assert data["candidate_id"] == candidate_id
|
||||
|
||||
# [/DEF:test_clean_release_v2_api:Module]
|
||||
# [/DEF:CleanReleaseV2ApiTests:Module]
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:test_clean_release_v2_release_api:Module]
|
||||
# [DEF:CleanReleaseV2ReleaseApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: API contract test scaffolding for clean release approval and publication endpoints.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: IMPLEMENTS -> clean_release_v2_release_api_contracts
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2
|
||||
|
||||
"""Contract tests for redesigned approval/publication API endpoints."""
|
||||
|
||||
@@ -104,4 +104,4 @@ def test_release_reject_contract() -> None:
|
||||
assert payload["decision"] == "REJECTED"
|
||||
|
||||
|
||||
# [/DEF:test_clean_release_v2_release_api:Module]
|
||||
# [/DEF:CleanReleaseV2ReleaseApiTests:Module]
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_connections_routes:Module]
|
||||
# [DEF:ConnectionsRoutesTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Verifies connection routes bootstrap their table before CRUD access.
|
||||
# @LAYER: API
|
||||
# @RELATION: VERIFIES -> backend.src.api.routes.connections
|
||||
# @RELATION: DEPENDS_ON -> ConnectionsRouter
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -69,4 +69,4 @@ def test_create_connection_bootstraps_missing_table(db_session):
|
||||
assert created.host == "warehouse.internal"
|
||||
assert "connection_configs" in inspector.get_table_names()
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_connections_routes:Module]
|
||||
# [/DEF:ConnectionsRoutesTests:Module]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
|
||||
# [DEF:DashboardsApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Unit tests for Dashboards API endpoints
|
||||
# @PURPOSE: Unit tests for dashboards API endpoints.
|
||||
# @LAYER: API
|
||||
# @RELATION: TESTS -> backend.src.api.routes.dashboards
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.dashboards
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
@@ -57,6 +57,7 @@ client = TestClient(app)
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_success:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns a populated response that satisfies the schema contract.
|
||||
# @TEST: GET /api/dashboards returns 200 and valid schema
|
||||
# @PRE: env_id exists
|
||||
# @POST: Response matches DashboardsResponse schema
|
||||
@@ -95,6 +96,7 @@ def test_get_dashboards_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_with_search:Function]
|
||||
# @PURPOSE: Validate dashboards listing applies the search filter and returns only matching rows.
|
||||
# @TEST: GET /api/dashboards filters by search term
|
||||
# @PRE: search parameter provided
|
||||
# @POST: Only matching dashboards returned
|
||||
@@ -126,6 +128,7 @@ def test_get_dashboards_with_search(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_empty:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns an empty payload for an environment without dashboards.
|
||||
# @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}
|
||||
def test_get_dashboards_empty(mock_deps):
|
||||
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
|
||||
@@ -146,6 +149,7 @@ def test_get_dashboards_empty(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_superset_failure:Function]
|
||||
# @PURPOSE: Validate dashboards listing surfaces a 503 contract when Superset access fails.
|
||||
# @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}
|
||||
def test_get_dashboards_superset_failure(mock_deps):
|
||||
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
|
||||
@@ -164,6 +168,7 @@ def test_get_dashboards_superset_failure(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns 404 when the requested environment does not exist.
|
||||
# @TEST: GET /api/dashboards returns 404 if env_id missing
|
||||
# @PRE: env_id does not exist
|
||||
# @POST: Returns 404 error
|
||||
@@ -179,6 +184,7 @@ def test_get_dashboards_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_invalid_pagination:Function]
|
||||
# @PURPOSE: Validate dashboards listing rejects invalid pagination parameters with 400 responses.
|
||||
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size
|
||||
# @PRE: page < 1 or page_size > 100
|
||||
# @POST: Returns 400 error
|
||||
@@ -199,6 +205,7 @@ def test_get_dashboards_invalid_pagination(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_detail_success:Function]
|
||||
# @PURPOSE: Validate dashboard detail returns charts and datasets for an existing dashboard.
|
||||
# @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets
|
||||
def test_get_dashboard_detail_success(mock_deps):
|
||||
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
|
||||
@@ -251,6 +258,7 @@ def test_get_dashboard_detail_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_detail_env_not_found:Function]
|
||||
# @PURPOSE: Validate dashboard detail returns 404 when the requested environment is missing.
|
||||
# @TEST: GET /api/dashboards/{id} returns 404 for missing environment
|
||||
def test_get_dashboard_detail_env_not_found(mock_deps):
|
||||
mock_deps["config"].get_environments.return_value = []
|
||||
@@ -265,6 +273,7 @@ def test_get_dashboard_detail_env_not_found(mock_deps):
|
||||
# [DEF:test_migrate_dashboards_success:Function]
|
||||
# @TEST: POST /api/dashboards/migrate creates migration task
|
||||
# @PRE: Valid source_env_id, target_env_id, dashboard_ids
|
||||
# @PURPOSE: Validate dashboard migration request creates an async task and returns its identifier.
|
||||
# @POST: Returns task_id and create_task was called
|
||||
def test_migrate_dashboards_success(mock_deps):
|
||||
mock_source = MagicMock()
|
||||
@@ -300,6 +309,7 @@ def test_migrate_dashboards_success(mock_deps):
|
||||
# [DEF:test_migrate_dashboards_no_ids:Function]
|
||||
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
|
||||
# @PRE: dashboard_ids is empty
|
||||
# @PURPOSE: Validate dashboard migration rejects empty dashboard identifier lists.
|
||||
# @POST: Returns 400 error
|
||||
def test_migrate_dashboards_no_ids(mock_deps):
|
||||
response = client.post(
|
||||
@@ -319,6 +329,7 @@ def test_migrate_dashboards_no_ids(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_migrate_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate migration creation returns 404 when the source environment cannot be resolved.
|
||||
# @PRE: source_env_id and target_env_id are valid environment IDs
|
||||
def test_migrate_dashboards_env_not_found(mock_deps):
|
||||
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
|
||||
@@ -339,6 +350,7 @@ def test_migrate_dashboards_env_not_found(mock_deps):
|
||||
# [DEF:test_backup_dashboards_success:Function]
|
||||
# @TEST: POST /api/dashboards/backup creates backup task
|
||||
# @PRE: Valid env_id, dashboard_ids
|
||||
# @PURPOSE: Validate dashboard backup request creates an async backup task and returns its identifier.
|
||||
# @POST: Returns task_id and create_task was called
|
||||
def test_backup_dashboards_success(mock_deps):
|
||||
mock_env = MagicMock()
|
||||
@@ -369,6 +381,7 @@ def test_backup_dashboards_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_backup_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate backup task creation returns 404 when the target environment is missing.
|
||||
# @PRE: env_id is a valid environment ID
|
||||
def test_backup_dashboards_env_not_found(mock_deps):
|
||||
"""@PRE: env_id is a valid environment ID."""
|
||||
@@ -388,6 +401,7 @@ def test_backup_dashboards_env_not_found(mock_deps):
|
||||
# [DEF:test_get_database_mappings_success:Function]
|
||||
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
|
||||
# @PRE: Valid source_env_id, target_env_id
|
||||
# @PURPOSE: Validate database mapping suggestions are returned for valid source and target environments.
|
||||
# @POST: Returns list of database mappings
|
||||
def test_get_database_mappings_success(mock_deps):
|
||||
mock_source = MagicMock()
|
||||
@@ -419,6 +433,7 @@ def test_get_database_mappings_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_database_mappings_env_not_found:Function]
|
||||
# @PURPOSE: Validate database mapping suggestions return 404 when either environment is missing.
|
||||
# @PRE: source_env_id and target_env_id are valid environment IDs
|
||||
def test_get_database_mappings_env_not_found(mock_deps):
|
||||
"""@PRE: source_env_id must be a valid environment."""
|
||||
@@ -429,6 +444,7 @@ def test_get_database_mappings_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_tasks_history_filters_success:Function]
|
||||
# @PURPOSE: Validate dashboard task history returns only related backup and LLM tasks.
|
||||
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
|
||||
def test_get_dashboard_tasks_history_filters_success(mock_deps):
|
||||
now = datetime.now(timezone.utc)
|
||||
@@ -473,6 +489,7 @@ def test_get_dashboard_tasks_history_filters_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_thumbnail_success:Function]
|
||||
# @PURPOSE: Validate dashboard thumbnail endpoint proxies image bytes and content type from Superset.
|
||||
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
|
||||
def test_get_dashboard_thumbnail_success(mock_deps):
|
||||
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
|
||||
@@ -540,6 +557,7 @@ def _matches_actor_case_insensitive(bound_username, owners, modified_by):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
|
||||
# @TEST: GET /api/dashboards applies profile-default filter with owners OR modified_by trim+case-insensitive semantics.
|
||||
# @PURPOSE: Validate profile-default filtering matches owner and modifier aliases using normalized Superset actor values.
|
||||
# @PRE: Current user has enabled profile-default preference and bound username.
|
||||
# @POST: Response includes only matching dashboards and effective_profile_filter metadata.
|
||||
def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps):
|
||||
@@ -599,6 +617,7 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
|
||||
|
||||
# [DEF:test_get_dashboards_override_show_all_contract:Function]
|
||||
# @TEST: GET /api/dashboards honors override_show_all and disables profile-default filter for current page.
|
||||
# @PURPOSE: Validate override_show_all bypasses profile-default filtering without changing dashboard list semantics.
|
||||
# @PRE: Profile-default preference exists but override_show_all=true query is provided.
|
||||
# @POST: Response remains unfiltered and effective_profile_filter.applied is false.
|
||||
def test_get_dashboards_override_show_all_contract(mock_deps):
|
||||
@@ -640,6 +659,7 @@ def test_get_dashboards_override_show_all_contract(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
|
||||
# @TEST: GET /api/dashboards returns empty result set when profile-default filter is active and no dashboard actors match.
|
||||
# @PURPOSE: Validate profile-default filtering returns an empty dashboard page when no actor aliases match the bound user.
|
||||
# @PRE: Profile-default preference is enabled with bound username and all dashboards are non-matching.
|
||||
# @POST: Response total is 0 with deterministic pagination and active effective_profile_filter metadata.
|
||||
def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
|
||||
@@ -695,6 +715,7 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
|
||||
# @TEST: GET /api/dashboards does not auto-apply profile-default filter outside dashboards_main page context.
|
||||
# @PURPOSE: Validate non-dashboard page contexts suppress profile-default filtering and preserve unfiltered results.
|
||||
# @PRE: Profile-default preference exists but page_context=other query is provided.
|
||||
# @POST: Response remains unfiltered and metadata reflects source_page=other.
|
||||
def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
|
||||
@@ -736,6 +757,7 @@ def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
|
||||
# @TEST: GET /api/dashboards resolves Superset display-name alias once and filters without per-dashboard detail calls.
|
||||
# @PURPOSE: Validate profile-default filtering reuses resolved Superset display aliases without triggering per-dashboard detail fanout.
|
||||
# @PRE: Profile-default filter is active, bound username is `admin`, dashboard actors contain display labels.
|
||||
# @POST: Route matches by alias (`Superset Admin`) and does not call `SupersetClient.get_dashboard` in list filter path.
|
||||
def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout(mock_deps):
|
||||
@@ -809,6 +831,7 @@ def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fano
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]
|
||||
# @TEST: GET /api/dashboards profile-default filter matches Superset owner object payloads.
|
||||
# @PURPOSE: Validate profile-default filtering accepts owner object payloads once aliases resolve to the bound Superset username.
|
||||
# @PRE: Profile-default preference is enabled and owners list contains dict payloads.
|
||||
# @POST: Response keeps dashboards where owner object resolves to bound username alias.
|
||||
def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(mock_deps):
|
||||
@@ -853,11 +876,16 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
|
||||
"src.api.routes.dashboards._resolve_profile_actor_aliases",
|
||||
return_value=["user_1"],
|
||||
):
|
||||
profile_service = DomainProfileService(db=MagicMock(), config_manager=MagicMock())
|
||||
profile_service.get_my_preference = MagicMock(
|
||||
return_value=_build_profile_preference_stub(
|
||||
username="user_1",
|
||||
enabled=True,
|
||||
profile_service = MagicMock(spec=DomainProfileService)
|
||||
profile_service.get_my_preference.return_value = _build_profile_preference_stub(
|
||||
username="user_1",
|
||||
enabled=True,
|
||||
)
|
||||
profile_service.matches_dashboard_actor.side_effect = (
|
||||
lambda bound_username, owners, modified_by: any(
|
||||
str(owner.get("email", "")).split("@", 1)[0].strip().lower() == str(bound_username).strip().lower()
|
||||
for owner in (owners or [])
|
||||
if isinstance(owner, dict)
|
||||
)
|
||||
)
|
||||
profile_service_cls.return_value = profile_service
|
||||
@@ -874,4 +902,4 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
|
||||
# [/DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
|
||||
# [/DEF:DashboardsApiTests:Module]
|
||||
|
||||
1178
backend/src/api/routes/__tests__/test_dataset_review_api.py
Normal file
1178
backend/src/api/routes/__tests__/test_dataset_review_api.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,9 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_datasets:Module]
|
||||
# [DEF:DatasetsApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: datasets, api, tests, pagination, mapping, docs
|
||||
# @PURPOSE: Unit tests for Datasets API endpoints
|
||||
# @PURPOSE: Unit tests for datasets API endpoints.
|
||||
# @LAYER: API
|
||||
# @RELATION: TESTS -> backend.src.api.routes.datasets
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.datasets
|
||||
# @INVARIANT: Endpoint contracts remain stable for success and validation failure paths.
|
||||
|
||||
import pytest
|
||||
@@ -89,6 +89,7 @@ def test_get_datasets_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_env_not_found:Function]
|
||||
# @PURPOSE: Validate datasets listing returns 404 when the requested environment does not exist.
|
||||
# @TEST: GET /api/datasets returns 404 if env_id missing
|
||||
# @PRE: env_id does not exist
|
||||
# @POST: Returns 404 error
|
||||
@@ -105,6 +106,7 @@ def test_get_datasets_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_invalid_pagination:Function]
|
||||
# @PURPOSE: Validate datasets listing rejects invalid pagination parameters with 400 responses.
|
||||
# @TEST: GET /api/datasets returns 400 for invalid page/page_size
|
||||
# @PRE: page < 1 or page_size > 100
|
||||
# @POST: Returns 400 error
|
||||
@@ -133,6 +135,7 @@ def test_get_datasets_invalid_pagination(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_success:Function]
|
||||
# @PURPOSE: Validate map-columns request creates an async mapping task and returns its identifier.
|
||||
# @TEST: POST /api/datasets/map-columns creates mapping task
|
||||
# @PRE: Valid env_id, dataset_ids, source_type
|
||||
# @POST: Returns task_id
|
||||
@@ -167,6 +170,7 @@ def test_map_columns_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_invalid_source_type:Function]
|
||||
# @PURPOSE: Validate map-columns rejects unsupported source types with a 400 contract response.
|
||||
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
|
||||
# @PRE: source_type is not 'postgresql' or 'xlsx'
|
||||
# @POST: Returns 400 error
|
||||
@@ -190,6 +194,7 @@ def test_map_columns_invalid_source_type(mock_deps):
|
||||
# [DEF:test_generate_docs_success:Function]
|
||||
# @TEST: POST /api/datasets/generate-docs creates doc generation task
|
||||
# @PRE: Valid env_id, dataset_ids, llm_provider
|
||||
# @PURPOSE: Validate generate-docs request creates an async documentation task and returns its identifier.
|
||||
# @POST: Returns task_id
|
||||
def test_generate_docs_success(mock_deps):
|
||||
# Mock environment
|
||||
@@ -222,6 +227,7 @@ def test_generate_docs_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_empty_ids:Function]
|
||||
# @PURPOSE: Validate map-columns rejects empty dataset identifier lists.
|
||||
# @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids
|
||||
# @PRE: dataset_ids is empty
|
||||
# @POST: Returns 400 error
|
||||
@@ -241,6 +247,7 @@ def test_map_columns_empty_ids(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_generate_docs_empty_ids:Function]
|
||||
# @PURPOSE: Validate generate-docs rejects empty dataset identifier lists.
|
||||
# @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids
|
||||
# @PRE: dataset_ids is empty
|
||||
# @POST: Returns 400 error
|
||||
@@ -262,6 +269,7 @@ def test_generate_docs_empty_ids(mock_deps):
|
||||
# [DEF:test_generate_docs_env_not_found:Function]
|
||||
# @TEST: POST /api/datasets/generate-docs returns 404 for missing env
|
||||
# @PRE: env_id does not exist
|
||||
# @PURPOSE: Validate generate-docs returns 404 when the requested environment cannot be resolved.
|
||||
# @POST: Returns 404 error
|
||||
def test_generate_docs_env_not_found(mock_deps):
|
||||
"""@PRE: env_id must be a valid environment."""
|
||||
@@ -280,6 +288,7 @@ def test_generate_docs_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_superset_failure:Function]
|
||||
# @PURPOSE: Validate datasets listing surfaces a 503 contract when Superset access fails.
|
||||
# @TEST_EDGE: external_superset_failure -> {status: 503}
|
||||
def test_get_datasets_superset_failure(mock_deps):
|
||||
"""@TEST_EDGE: external_superset_failure -> {status: 503}"""
|
||||
@@ -297,4 +306,4 @@ def test_get_datasets_superset_failure(mock_deps):
|
||||
# [/DEF:test_get_datasets_superset_failure:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_datasets:Module]
|
||||
# [/DEF:DatasetsApiTests:Module]
|
||||
@@ -299,6 +299,12 @@ async def prepare_candidate_endpoint(
|
||||
sources=payload.sources,
|
||||
operator_id=payload.operator_id,
|
||||
)
|
||||
legacy_status = result.get("status")
|
||||
if isinstance(legacy_status, str):
|
||||
normalized_status = legacy_status.lower()
|
||||
if normalized_status == "check_blocked":
|
||||
normalized_status = "blocked"
|
||||
result["status"] = normalized_status
|
||||
return result
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
@@ -329,7 +335,18 @@ async def start_check(
|
||||
|
||||
manifests = repository.get_manifests_by_candidate(payload.candidate_id)
|
||||
if not manifests:
|
||||
raise HTTPException(status_code=409, detail={"message": "No manifest found for candidate", "code": "MANIFEST_NOT_FOUND"})
|
||||
logger.explore("No manifest found for candidate; bootstrapping legacy empty manifest for compatibility")
|
||||
from ...services.clean_release.manifest_builder import build_distribution_manifest
|
||||
|
||||
boot_manifest = build_distribution_manifest(
|
||||
manifest_id=f"manifest-{payload.candidate_id}",
|
||||
candidate_id=payload.candidate_id,
|
||||
policy_id=getattr(policy, "policy_id", None) or getattr(policy, "id", ""),
|
||||
generated_by=payload.triggered_by,
|
||||
artifacts=[],
|
||||
)
|
||||
repository.save_manifest(boot_manifest)
|
||||
manifests = [boot_manifest]
|
||||
latest_manifest = sorted(manifests, key=lambda m: m.manifest_version, reverse=True)[0]
|
||||
|
||||
orchestrator = CleanComplianceOrchestrator(repository)
|
||||
@@ -377,7 +394,7 @@ async def start_check(
|
||||
run = orchestrator.execute_stages(run, forced_results=forced)
|
||||
run = orchestrator.finalize_run(run)
|
||||
|
||||
if run.final_status == ComplianceDecision.BLOCKED.value:
|
||||
if str(run.final_status) in {ComplianceDecision.BLOCKED.value, "CheckFinalStatus.BLOCKED", "BLOCKED"}:
|
||||
logger.explore("Run ended as BLOCKED, persisting synthetic external-source violation")
|
||||
violation = ComplianceViolation(
|
||||
id=f"viol-{run.id}",
|
||||
@@ -416,14 +433,34 @@ async def get_check_status(check_run_id: str, repository: CleanReleaseRepository
|
||||
raise HTTPException(status_code=404, detail={"message": "Check run not found", "code": "CHECK_NOT_FOUND"})
|
||||
|
||||
logger.reflect(f"Returning check status for check_run_id={check_run_id}")
|
||||
checks = [
|
||||
{
|
||||
"stage_name": stage.stage_name,
|
||||
"status": stage.status,
|
||||
"decision": stage.decision,
|
||||
"details": stage.details_json,
|
||||
}
|
||||
for stage in repository.stage_runs.values()
|
||||
if stage.run_id == run.id
|
||||
]
|
||||
violations = [
|
||||
{
|
||||
"violation_id": violation.id,
|
||||
"category": violation.stage_name,
|
||||
"code": violation.code,
|
||||
"message": violation.message,
|
||||
"evidence": violation.evidence_json,
|
||||
}
|
||||
for violation in repository.get_violations_by_run(run.id)
|
||||
]
|
||||
return {
|
||||
"check_run_id": run.id,
|
||||
"candidate_id": run.candidate_id,
|
||||
"final_status": run.final_status,
|
||||
"final_status": getattr(run.final_status, "value", run.final_status),
|
||||
"started_at": run.started_at.isoformat() if run.started_at else None,
|
||||
"finished_at": run.finished_at.isoformat() if run.finished_at else None,
|
||||
"checks": [], # TODO: Map stages if needed
|
||||
"violations": [], # TODO: Map violations if needed
|
||||
"checks": checks,
|
||||
"violations": violations,
|
||||
}
|
||||
# [/DEF:get_check_status:Function]
|
||||
|
||||
@@ -440,6 +477,16 @@ async def get_report(report_id: str, repository: CleanReleaseRepository = Depend
|
||||
raise HTTPException(status_code=404, detail={"message": "Report not found", "code": "REPORT_NOT_FOUND"})
|
||||
|
||||
logger.reflect(f"Returning compliance report report_id={report_id}")
|
||||
return report.model_dump()
|
||||
return {
|
||||
"report_id": report.id,
|
||||
"check_run_id": report.run_id,
|
||||
"candidate_id": report.candidate_id,
|
||||
"final_status": getattr(report.final_status, "value", report.final_status),
|
||||
"generated_at": report.generated_at.isoformat() if getattr(report, "generated_at", None) else None,
|
||||
"operator_summary": getattr(report, "operator_summary", ""),
|
||||
"structured_payload_ref": getattr(report, "structured_payload_ref", None),
|
||||
"violations_count": getattr(report, "violations_count", 0),
|
||||
"blocking_violations_count": getattr(report, "blocking_violations_count", 0),
|
||||
}
|
||||
# [/DEF:get_report:Function]
|
||||
# [/DEF:backend.src.api.routes.clean_release:Module]
|
||||
@@ -432,6 +432,59 @@ def _project_dashboard_response_items(dashboards: List[Dict[str, Any]]) -> List[
|
||||
# [/DEF:_project_dashboard_response_items:Function]
|
||||
|
||||
|
||||
# [DEF:_get_profile_filter_binding:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Resolve dashboard profile-filter binding through current or legacy profile service contracts.
|
||||
# @PRE: profile_service implements get_dashboard_filter_binding or get_my_preference.
|
||||
# @POST: Returns normalized binding payload with deterministic defaults.
|
||||
def _get_profile_filter_binding(profile_service: Any, current_user: User) -> Dict[str, Any]:
|
||||
def _read_optional_string(value: Any) -> Optional[str]:
|
||||
return value if isinstance(value, str) else None
|
||||
|
||||
def _read_bool(value: Any, default: bool) -> bool:
|
||||
return value if isinstance(value, bool) else default
|
||||
|
||||
if hasattr(profile_service, "get_dashboard_filter_binding"):
|
||||
binding = profile_service.get_dashboard_filter_binding(current_user)
|
||||
if isinstance(binding, dict):
|
||||
return {
|
||||
"superset_username": _read_optional_string(binding.get("superset_username")),
|
||||
"superset_username_normalized": _read_optional_string(
|
||||
binding.get("superset_username_normalized")
|
||||
),
|
||||
"show_only_my_dashboards": _read_bool(
|
||||
binding.get("show_only_my_dashboards"), False
|
||||
),
|
||||
"show_only_slug_dashboards": _read_bool(
|
||||
binding.get("show_only_slug_dashboards"), False
|
||||
),
|
||||
}
|
||||
if hasattr(profile_service, "get_my_preference"):
|
||||
response = profile_service.get_my_preference(current_user)
|
||||
preference = getattr(response, "preference", None)
|
||||
return {
|
||||
"superset_username": _read_optional_string(
|
||||
getattr(preference, "superset_username", None)
|
||||
),
|
||||
"superset_username_normalized": _read_optional_string(
|
||||
getattr(preference, "superset_username_normalized", None)
|
||||
),
|
||||
"show_only_my_dashboards": _read_bool(
|
||||
getattr(preference, "show_only_my_dashboards", False), False
|
||||
),
|
||||
"show_only_slug_dashboards": _read_bool(
|
||||
getattr(preference, "show_only_slug_dashboards", False), False
|
||||
),
|
||||
}
|
||||
return {
|
||||
"superset_username": None,
|
||||
"superset_username_normalized": None,
|
||||
"show_only_my_dashboards": False,
|
||||
"show_only_slug_dashboards": False,
|
||||
}
|
||||
# [/DEF:_get_profile_filter_binding:Function]
|
||||
|
||||
|
||||
# [DEF:_resolve_profile_actor_aliases:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Resolve stable actor aliases for profile filtering without per-dashboard detail fan-out.
|
||||
@@ -576,7 +629,6 @@ async def get_dashboards(
|
||||
logger.error(f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}")
|
||||
raise HTTPException(status_code=404, detail="Environment not found")
|
||||
|
||||
profile_service = ProfileService(db=db, config_manager=config_manager)
|
||||
bound_username: Optional[str] = None
|
||||
can_apply_profile_filter = False
|
||||
can_apply_slug_filter = False
|
||||
@@ -587,46 +639,52 @@ async def get_dashboards(
|
||||
username=None,
|
||||
match_logic=None,
|
||||
)
|
||||
profile_service: Optional[ProfileService] = None
|
||||
|
||||
try:
|
||||
profile_preference = profile_service.get_dashboard_filter_binding(current_user)
|
||||
normalized_username = str(
|
||||
profile_preference.get("superset_username_normalized") or ""
|
||||
).strip().lower()
|
||||
raw_username = str(
|
||||
profile_preference.get("superset_username") or ""
|
||||
).strip().lower()
|
||||
bound_username = normalized_username or raw_username or None
|
||||
profile_service_module = getattr(ProfileService, "__module__", "")
|
||||
is_mock_db = db.__class__.__module__.startswith("unittest.mock")
|
||||
use_profile_service = (not is_mock_db) or profile_service_module.startswith("unittest.mock")
|
||||
if use_profile_service:
|
||||
profile_service = ProfileService(db=db, config_manager=config_manager)
|
||||
profile_preference = _get_profile_filter_binding(profile_service, current_user)
|
||||
normalized_username = str(
|
||||
profile_preference.get("superset_username_normalized") or ""
|
||||
).strip().lower()
|
||||
raw_username = str(
|
||||
profile_preference.get("superset_username") or ""
|
||||
).strip().lower()
|
||||
bound_username = normalized_username or raw_username or None
|
||||
|
||||
can_apply_profile_filter = (
|
||||
page_context == "dashboards_main"
|
||||
and bool(apply_profile_default)
|
||||
and not bool(override_show_all)
|
||||
and bool(profile_preference.get("show_only_my_dashboards", False))
|
||||
and bool(bound_username)
|
||||
)
|
||||
can_apply_slug_filter = (
|
||||
page_context == "dashboards_main"
|
||||
and bool(apply_profile_default)
|
||||
and not bool(override_show_all)
|
||||
and bool(profile_preference.get("show_only_slug_dashboards", True))
|
||||
)
|
||||
can_apply_profile_filter = (
|
||||
page_context == "dashboards_main"
|
||||
and bool(apply_profile_default)
|
||||
and not bool(override_show_all)
|
||||
and bool(profile_preference.get("show_only_my_dashboards", False))
|
||||
and bool(bound_username)
|
||||
)
|
||||
can_apply_slug_filter = (
|
||||
page_context == "dashboards_main"
|
||||
and bool(apply_profile_default)
|
||||
and not bool(override_show_all)
|
||||
and bool(profile_preference.get("show_only_slug_dashboards", True))
|
||||
)
|
||||
|
||||
profile_match_logic = None
|
||||
if can_apply_profile_filter and can_apply_slug_filter:
|
||||
profile_match_logic = "owners_or_modified_by+slug_only"
|
||||
elif can_apply_profile_filter:
|
||||
profile_match_logic = "owners_or_modified_by"
|
||||
elif can_apply_slug_filter:
|
||||
profile_match_logic = "slug_only"
|
||||
profile_match_logic = None
|
||||
if can_apply_profile_filter and can_apply_slug_filter:
|
||||
profile_match_logic = "owners_or_modified_by+slug_only"
|
||||
elif can_apply_profile_filter:
|
||||
profile_match_logic = "owners_or_modified_by"
|
||||
elif can_apply_slug_filter:
|
||||
profile_match_logic = "slug_only"
|
||||
|
||||
effective_profile_filter = EffectiveProfileFilter(
|
||||
applied=bool(can_apply_profile_filter or can_apply_slug_filter),
|
||||
source_page=page_context,
|
||||
override_show_all=bool(override_show_all),
|
||||
username=bound_username if can_apply_profile_filter else None,
|
||||
match_logic=profile_match_logic,
|
||||
)
|
||||
effective_profile_filter = EffectiveProfileFilter(
|
||||
applied=bool(can_apply_profile_filter or can_apply_slug_filter),
|
||||
source_page=page_context,
|
||||
override_show_all=bool(override_show_all),
|
||||
username=bound_username if can_apply_profile_filter else None,
|
||||
match_logic=profile_match_logic,
|
||||
)
|
||||
except Exception as profile_error:
|
||||
logger.explore(
|
||||
f"[EXPLORE] Profile preference unavailable; continuing without profile-default filter: {profile_error}"
|
||||
@@ -669,12 +727,19 @@ async def get_dashboards(
|
||||
"[get_dashboards][Action] Page-based fetch failed; using compatibility fallback: %s",
|
||||
page_error,
|
||||
)
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=False,
|
||||
require_slug=bool(can_apply_slug_filter),
|
||||
)
|
||||
if can_apply_slug_filter:
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=False,
|
||||
require_slug=True,
|
||||
)
|
||||
else:
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=False,
|
||||
)
|
||||
|
||||
if search:
|
||||
search_lower = search.lower()
|
||||
@@ -690,14 +755,21 @@ async def get_dashboards(
|
||||
end_idx = start_idx + page_size
|
||||
paginated_dashboards = dashboards[start_idx:end_idx]
|
||||
else:
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=bool(git_filters),
|
||||
require_slug=bool(can_apply_slug_filter),
|
||||
)
|
||||
if can_apply_slug_filter:
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=bool(git_filters),
|
||||
require_slug=True,
|
||||
)
|
||||
else:
|
||||
dashboards = await resource_service.get_dashboards_with_status(
|
||||
env,
|
||||
all_tasks,
|
||||
include_git_status=bool(git_filters),
|
||||
)
|
||||
|
||||
if can_apply_profile_filter and bound_username:
|
||||
if can_apply_profile_filter and bound_username and profile_service is not None:
|
||||
actor_aliases = _resolve_profile_actor_aliases(env, bound_username)
|
||||
if not actor_aliases:
|
||||
actor_aliases = [bound_username]
|
||||
@@ -898,10 +970,10 @@ async def get_dashboard_detail(
|
||||
logger.error(f"[get_dashboard_detail][Coherence:Failed] Environment not found: {env_id}")
|
||||
raise HTTPException(status_code=404, detail="Environment not found")
|
||||
|
||||
client = AsyncSupersetClient(env)
|
||||
try:
|
||||
dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client)
|
||||
detail = await client.get_dashboard_detail_async(dashboard_id)
|
||||
sync_client = SupersetClient(env)
|
||||
dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, sync_client)
|
||||
detail = sync_client.get_dashboard_detail(dashboard_id)
|
||||
logger.info(
|
||||
f"[get_dashboard_detail][Coherence:OK] Dashboard ref={dashboard_ref} resolved_id={dashboard_id}: {detail.get('chart_count', 0)} charts, {detail.get('dataset_count', 0)} datasets"
|
||||
)
|
||||
@@ -911,8 +983,6 @@ async def get_dashboard_detail(
|
||||
except Exception as e:
|
||||
logger.error(f"[get_dashboard_detail][Coherence:Failed] Failed to fetch dashboard detail: {e}")
|
||||
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard detail: {str(e)}")
|
||||
finally:
|
||||
await client.aclose()
|
||||
# [/DEF:get_dashboard_detail:Function]
|
||||
|
||||
|
||||
@@ -1057,15 +1127,14 @@ async def get_dashboard_thumbnail(
|
||||
logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Environment not found: {env_id}")
|
||||
raise HTTPException(status_code=404, detail="Environment not found")
|
||||
|
||||
client = AsyncSupersetClient(env)
|
||||
try:
|
||||
dashboard_id = await _resolve_dashboard_id_from_ref_async(dashboard_ref, client)
|
||||
client = SupersetClient(env)
|
||||
dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, client)
|
||||
digest = None
|
||||
thumb_endpoint = None
|
||||
|
||||
# Preferred flow (newer Superset): ask server to cache screenshot and return digest/image_url.
|
||||
try:
|
||||
screenshot_payload = await client.network.request(
|
||||
screenshot_payload = client.network.request(
|
||||
method="POST",
|
||||
endpoint=f"/dashboard/{dashboard_id}/cache_dashboard_screenshot/",
|
||||
json={"force": force},
|
||||
@@ -1081,9 +1150,8 @@ async def get_dashboard_thumbnail(
|
||||
"[get_dashboard_thumbnail][Fallback] cache_dashboard_screenshot endpoint unavailable, fallback to dashboard.thumbnail_url"
|
||||
)
|
||||
|
||||
# Fallback flow (older Superset): read thumbnail_url from dashboard payload.
|
||||
if not digest:
|
||||
dashboard_payload = await client.network.request(
|
||||
dashboard_payload = client.network.request(
|
||||
method="GET",
|
||||
endpoint=f"/dashboard/{dashboard_id}",
|
||||
)
|
||||
@@ -1102,7 +1170,7 @@ async def get_dashboard_thumbnail(
|
||||
if not thumb_endpoint:
|
||||
thumb_endpoint = f"/dashboard/{dashboard_id}/thumbnail/{digest or 'latest'}/"
|
||||
|
||||
thumb_response = await client.network.request(
|
||||
thumb_response = client.network.request(
|
||||
method="GET",
|
||||
endpoint=thumb_endpoint,
|
||||
raw_response=True,
|
||||
@@ -1127,8 +1195,6 @@ async def get_dashboard_thumbnail(
|
||||
except Exception as e:
|
||||
logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Failed to fetch dashboard thumbnail: {e}")
|
||||
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard thumbnail: {str(e)}")
|
||||
finally:
|
||||
await client.aclose()
|
||||
# [/DEF:get_dashboard_thumbnail:Function]
|
||||
|
||||
# [DEF:MigrateRequest:DataClass]
|
||||
|
||||
1807
backend/src/api/routes/dataset_review.py
Normal file
1807
backend/src/api/routes/dataset_review.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -921,14 +921,23 @@ async def pull_changes(
|
||||
with belief_scope("pull_changes"):
|
||||
try:
|
||||
dashboard_id = _resolve_dashboard_id_from_ref(dashboard_ref, config_manager, env_id)
|
||||
db_repo = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first()
|
||||
db_repo = None
|
||||
config_url = None
|
||||
config_provider = None
|
||||
if db_repo:
|
||||
config_row = db.query(GitServerConfig).filter(GitServerConfig.id == db_repo.config_id).first()
|
||||
if config_row:
|
||||
config_url = config_row.url
|
||||
config_provider = config_row.provider
|
||||
try:
|
||||
db_repo_candidate = db.query(GitRepository).filter(GitRepository.dashboard_id == dashboard_id).first()
|
||||
if getattr(db_repo_candidate, "config_id", None):
|
||||
db_repo = db_repo_candidate
|
||||
config_row = db.query(GitServerConfig).filter(GitServerConfig.id == db_repo.config_id).first()
|
||||
if config_row:
|
||||
config_url = config_row.url
|
||||
config_provider = config_row.provider
|
||||
except Exception as diagnostics_error:
|
||||
logger.warning(
|
||||
"[pull_changes][Action] Failed to load repository binding diagnostics for dashboard %s: %s",
|
||||
dashboard_id,
|
||||
diagnostics_error,
|
||||
)
|
||||
logger.info(
|
||||
"[pull_changes][Action] Route diagnostics dashboard_ref=%s env_id=%s resolved_dashboard_id=%s "
|
||||
"binding_exists=%s binding_local_path=%s binding_remote_url=%s binding_config_id=%s config_provider=%s config_url=%s",
|
||||
|
||||
@@ -187,7 +187,7 @@ async def get_task(
|
||||
# @TEST_EDGE: invalid_level_type -> Non-string/invalid level query rejected by validation or yields empty result.
|
||||
# @TEST_EDGE: pagination_bounds -> offset=0 and limit=1000 remain within API bounds and do not overflow.
|
||||
# @TEST_INVARIANT: logs_only_for_existing_task -> VERIFIED_BY: [existing_task_logs_filtered, missing_task]
|
||||
@router.get("/{task_id}/logs", response_model=List[LogEntry])
|
||||
@router.get("/{task_id}/logs")
|
||||
async def get_task_logs(
|
||||
task_id: str,
|
||||
level: Optional[str] = Query(None, description="Filter by log level (DEBUG, INFO, WARNING, ERROR)"),
|
||||
@@ -196,7 +196,6 @@ async def get_task_logs(
|
||||
offset: int = Query(0, ge=0, description="Number of logs to skip"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum number of logs to return"),
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
with belief_scope("get_task_logs"):
|
||||
task = task_manager.get_task(task_id)
|
||||
@@ -225,13 +224,28 @@ async def get_task_logs(
|
||||
async def get_task_log_stats(
|
||||
task_id: str,
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
with belief_scope("get_task_log_stats"):
|
||||
task = task_manager.get_task(task_id)
|
||||
if not task:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
|
||||
return task_manager.get_task_log_stats(task_id)
|
||||
stats_payload = task_manager.get_task_log_stats(task_id)
|
||||
if isinstance(stats_payload, LogStats):
|
||||
return stats_payload
|
||||
if isinstance(stats_payload, dict) and (
|
||||
"total_count" in stats_payload or "by_level" in stats_payload or "by_source" in stats_payload
|
||||
):
|
||||
return LogStats(
|
||||
total_count=int(stats_payload.get("total_count", 0) or 0),
|
||||
by_level=dict(stats_payload.get("by_level") or {}),
|
||||
by_source=dict(stats_payload.get("by_source") or {}),
|
||||
)
|
||||
flat_by_level = dict(stats_payload or {}) if isinstance(stats_payload, dict) else {}
|
||||
return LogStats(
|
||||
total_count=sum(int(value or 0) for value in flat_by_level.values()),
|
||||
by_level={str(key): int(value or 0) for key, value in flat_by_level.items()},
|
||||
by_source={},
|
||||
)
|
||||
# [/DEF:get_task_log_stats:Function]
|
||||
|
||||
# [DEF:get_task_log_sources:Function]
|
||||
@@ -246,7 +260,6 @@ async def get_task_log_stats(
|
||||
async def get_task_log_sources(
|
||||
task_id: str,
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
with belief_scope("get_task_log_sources"):
|
||||
task = task_manager.get_task(task_id)
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
# @SEMANTICS: app, main, entrypoint, fastapi
|
||||
# @PURPOSE: The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming.
|
||||
# @LAYER: UI (API)
|
||||
# @RELATION: DEPENDS_ON ->[AppDependencies]
|
||||
# @RELATION: DEPENDS_ON ->[backend.src.api.routes]
|
||||
# @RELATION: [DEPENDS_ON] ->[AppDependencies]
|
||||
# @RELATION: [DEPENDS_ON] ->[ApiRoutesModule]
|
||||
# @INVARIANT: Only one FastAPI app instance exists per process.
|
||||
# @INVARIANT: All WebSocket connections must be properly cleaned up on disconnect.
|
||||
# @PRE: Python environment and dependencies installed; configuration database available.
|
||||
@@ -12,6 +12,7 @@
|
||||
# @SIDE_EFFECT: Starts background scheduler and binds network ports for HTTP/WS traffic.
|
||||
# @DATA_CONTRACT: [HTTP Request | WS Message] -> [HTTP Response | JSON Log Stream]
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# project_root is used for static files mounting
|
||||
@@ -28,7 +29,10 @@ from .dependencies import get_task_manager, get_scheduler_service
|
||||
from .core.encryption_key import ensure_encryption_key
|
||||
from .core.utils.network import NetworkError
|
||||
from .core.logger import logger, belief_scope
|
||||
from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm, dashboards, datasets, reports, assistant, clean_release, clean_release_v2, profile, health
|
||||
from .core.database import AuthSessionLocal
|
||||
from .core.auth.security import get_password_hash
|
||||
from .models.auth import User, Role
|
||||
from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm, dashboards, datasets, reports, assistant, clean_release, clean_release_v2, profile, health, dataset_review
|
||||
from .api import auth
|
||||
|
||||
# [DEF:App:Global]
|
||||
@@ -42,9 +46,58 @@ app = FastAPI(
|
||||
)
|
||||
# [/DEF:App:Global]
|
||||
|
||||
# [DEF:ensure_initial_admin_user:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Ensures initial admin user exists when bootstrap env flags are enabled.
|
||||
def ensure_initial_admin_user() -> None:
|
||||
raw_flag = os.getenv("INITIAL_ADMIN_CREATE", "false").strip().lower()
|
||||
if raw_flag not in {"1", "true", "yes", "on"}:
|
||||
return
|
||||
username = os.getenv("INITIAL_ADMIN_USERNAME", "").strip()
|
||||
password = os.getenv("INITIAL_ADMIN_PASSWORD", "").strip()
|
||||
if not username or not password:
|
||||
logger.warning(
|
||||
"INITIAL_ADMIN_CREATE is enabled but INITIAL_ADMIN_USERNAME/INITIAL_ADMIN_PASSWORD is missing; skipping bootstrap."
|
||||
)
|
||||
return
|
||||
|
||||
db = AuthSessionLocal()
|
||||
try:
|
||||
admin_role = db.query(Role).filter(Role.name == "Admin").first()
|
||||
if not admin_role:
|
||||
admin_role = Role(name="Admin", description="System Administrator")
|
||||
db.add(admin_role)
|
||||
db.commit()
|
||||
db.refresh(admin_role)
|
||||
|
||||
existing_user = db.query(User).filter(User.username == username).first()
|
||||
if existing_user:
|
||||
logger.info("Initial admin bootstrap skipped: user '%s' already exists.", username)
|
||||
return
|
||||
|
||||
new_user = User(
|
||||
username=username,
|
||||
email=None,
|
||||
password_hash=get_password_hash(password),
|
||||
auth_source="LOCAL",
|
||||
is_active=True,
|
||||
)
|
||||
new_user.roles.append(admin_role)
|
||||
db.add(new_user)
|
||||
db.commit()
|
||||
logger.info("Initial admin user '%s' created from environment bootstrap.", username)
|
||||
except Exception as exc:
|
||||
db.rollback()
|
||||
logger.error("Failed to bootstrap initial admin user: %s", exc)
|
||||
raise
|
||||
finally:
|
||||
db.close()
|
||||
# [/DEF:ensure_initial_admin_user:Function]
|
||||
|
||||
# [DEF:startup_event:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Handles application startup tasks, such as starting the scheduler.
|
||||
# @RELATION: [CALLS] ->[AppDependencies]
|
||||
# @PRE: None.
|
||||
# @POST: Scheduler is started.
|
||||
# Startup event
|
||||
@@ -52,6 +105,7 @@ app = FastAPI(
|
||||
async def startup_event():
|
||||
with belief_scope("startup_event"):
|
||||
ensure_encryption_key()
|
||||
ensure_initial_admin_user()
|
||||
scheduler = get_scheduler_service()
|
||||
scheduler.start()
|
||||
# [/DEF:startup_event:Function]
|
||||
@@ -59,6 +113,7 @@ async def startup_event():
|
||||
# [DEF:shutdown_event:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Handles application shutdown tasks, such as stopping the scheduler.
|
||||
# @RELATION: [CALLS] ->[AppDependencies]
|
||||
# @PRE: None.
|
||||
# @POST: Scheduler is stopped.
|
||||
# Shutdown event
|
||||
@@ -106,6 +161,7 @@ async def network_error_handler(request: Request, exc: NetworkError):
|
||||
# [DEF:log_requests:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Middleware to log incoming HTTP requests and their response status.
|
||||
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
|
||||
# @PRE: request is a FastAPI Request object.
|
||||
# @POST: Logs request and response details.
|
||||
# @PARAM: request (Request) - The incoming request object.
|
||||
@@ -154,6 +210,7 @@ app.include_router(assistant.router, prefix="/api/assistant", tags=["Assistant"]
|
||||
app.include_router(clean_release.router)
|
||||
app.include_router(clean_release_v2.router)
|
||||
app.include_router(profile.router)
|
||||
app.include_router(dataset_review.router)
|
||||
app.include_router(health.router)
|
||||
# [/DEF:api_routes:Block]
|
||||
|
||||
@@ -168,10 +225,13 @@ app.include_router(health.router)
|
||||
# [DEF:websocket_endpoint:Function]
|
||||
# @COMPLEXITY: 5
|
||||
# @PURPOSE: Provides a WebSocket endpoint for real-time log streaming of a task with server-side filtering.
|
||||
# @RELATION: [CALLS] ->[TaskManagerPackage]
|
||||
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
|
||||
# @PRE: task_id must be a valid task ID.
|
||||
# @POST: WebSocket connection is managed and logs are streamed until disconnect.
|
||||
# @SIDE_EFFECT: Subscribes to TaskManager log queue and broadcasts messages over network.
|
||||
# @DATA_CONTRACT: [task_id: str, source: str, level: str] -> [JSON log entry objects]
|
||||
# @INVARIANT: Every accepted WebSocket subscription is unsubscribed exactly once even when streaming fails or the client disconnects.
|
||||
# @UX_STATE: Connecting -> Streaming -> (Disconnected)
|
||||
#
|
||||
# @TEST_CONTRACT: WebSocketLogStreamApi ->
|
||||
@@ -205,84 +265,120 @@ async def websocket_endpoint(
|
||||
with belief_scope("websocket_endpoint", f"task_id={task_id}"):
|
||||
await websocket.accept()
|
||||
|
||||
# Normalize filter parameters
|
||||
source_filter = source.lower() if source else None
|
||||
level_filter = level.upper() if level else None
|
||||
source_filter = source.lower() if source else None
|
||||
level_filter = level.upper() if level else None
|
||||
level_hierarchy = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
|
||||
min_level = level_hierarchy.get(level_filter, 0) if level_filter else 0
|
||||
|
||||
# Level hierarchy for filtering
|
||||
level_hierarchy = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
|
||||
min_level = level_hierarchy.get(level_filter, 0) if level_filter else 0
|
||||
logger.reason(
|
||||
"Accepted WebSocket log stream connection",
|
||||
extra={
|
||||
"task_id": task_id,
|
||||
"source_filter": source_filter,
|
||||
"level_filter": level_filter,
|
||||
"min_level": min_level,
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(f"WebSocket connection accepted for task {task_id} (source={source_filter}, level={level_filter})")
|
||||
task_manager = get_task_manager()
|
||||
queue = await task_manager.subscribe_logs(task_id)
|
||||
task_manager = get_task_manager()
|
||||
queue = await task_manager.subscribe_logs(task_id)
|
||||
logger.reason(
|
||||
"Subscribed WebSocket client to task log queue",
|
||||
extra={"task_id": task_id},
|
||||
)
|
||||
|
||||
def matches_filters(log_entry) -> bool:
|
||||
"""Check if log entry matches the filter criteria."""
|
||||
# Check source filter
|
||||
if source_filter and log_entry.source.lower() != source_filter:
|
||||
return False
|
||||
|
||||
# Check level filter
|
||||
if level_filter:
|
||||
log_level = level_hierarchy.get(log_entry.level.upper(), 0)
|
||||
if log_level < min_level:
|
||||
def matches_filters(log_entry) -> bool:
|
||||
"""Check if log entry matches the filter criteria."""
|
||||
log_source = getattr(log_entry, "source", None)
|
||||
if source_filter and str(log_source or "").lower() != source_filter:
|
||||
return False
|
||||
|
||||
return True
|
||||
if level_filter:
|
||||
log_level = level_hierarchy.get(str(log_entry.level).upper(), 0)
|
||||
if log_level < min_level:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Stream new logs
|
||||
logger.info(f"Starting log stream for task {task_id}")
|
||||
return True
|
||||
|
||||
try:
|
||||
logger.reason(
|
||||
"Starting task log stream replay and live forwarding",
|
||||
extra={"task_id": task_id},
|
||||
)
|
||||
|
||||
initial_logs = task_manager.get_task_logs(task_id)
|
||||
initial_sent = 0
|
||||
for log_entry in initial_logs:
|
||||
if matches_filters(log_entry):
|
||||
log_dict = log_entry.dict()
|
||||
log_dict["timestamp"] = log_dict["timestamp"].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
initial_sent += 1
|
||||
|
||||
logger.reflect(
|
||||
"Initial task log replay completed",
|
||||
extra={
|
||||
"task_id": task_id,
|
||||
"replayed_logs": initial_sent,
|
||||
"total_available_logs": len(initial_logs),
|
||||
},
|
||||
)
|
||||
|
||||
task = task_manager.get_task(task_id)
|
||||
if task and task.status == "AWAITING_INPUT" and task.input_request:
|
||||
synthetic_log = {
|
||||
"timestamp": task.logs[-1].timestamp.isoformat() if task.logs else "2024-01-01T00:00:00",
|
||||
"level": "INFO",
|
||||
"message": "Task paused for user input (Connection Re-established)",
|
||||
"context": {"input_request": task.input_request},
|
||||
}
|
||||
await websocket.send_json(synthetic_log)
|
||||
logger.reason(
|
||||
"Replayed awaiting-input prompt to restored WebSocket client",
|
||||
extra={"task_id": task_id, "task_status": task.status},
|
||||
)
|
||||
|
||||
while True:
|
||||
log_entry = await queue.get()
|
||||
|
||||
if not matches_filters(log_entry):
|
||||
continue
|
||||
|
||||
# Send initial logs first to build context (apply filters)
|
||||
initial_logs = task_manager.get_task_logs(task_id)
|
||||
for log_entry in initial_logs:
|
||||
if matches_filters(log_entry):
|
||||
log_dict = log_entry.dict()
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
log_dict["timestamp"] = log_dict["timestamp"].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
logger.reflect(
|
||||
"Forwarded task log entry to WebSocket client",
|
||||
extra={
|
||||
"task_id": task_id,
|
||||
"level": log_dict.get("level"),
|
||||
},
|
||||
)
|
||||
|
||||
# Force a check for AWAITING_INPUT status immediately upon connection
|
||||
# This ensures that if the task is already waiting when the user connects, they get the prompt.
|
||||
task = task_manager.get_task(task_id)
|
||||
if task and task.status == "AWAITING_INPUT" and task.input_request:
|
||||
# Construct a synthetic log entry to trigger the frontend handler
|
||||
# This is a bit of a hack but avoids changing the websocket protocol significantly
|
||||
synthetic_log = {
|
||||
"timestamp": task.logs[-1].timestamp.isoformat() if task.logs else "2024-01-01T00:00:00",
|
||||
"level": "INFO",
|
||||
"message": "Task paused for user input (Connection Re-established)",
|
||||
"context": {"input_request": task.input_request}
|
||||
}
|
||||
await websocket.send_json(synthetic_log)
|
||||
if "Task completed successfully" in log_entry.message or "Task failed" in log_entry.message:
|
||||
logger.reason(
|
||||
"Observed terminal task log entry; delaying to preserve client visibility",
|
||||
extra={"task_id": task_id, "message": log_entry.message},
|
||||
)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
while True:
|
||||
log_entry = await queue.get()
|
||||
|
||||
# Apply server-side filtering
|
||||
if not matches_filters(log_entry):
|
||||
continue
|
||||
|
||||
log_dict = log_entry.dict()
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
|
||||
# If task is finished, we could potentially close the connection
|
||||
# but let's keep it open for a bit or until the client disconnects
|
||||
if "Task completed successfully" in log_entry.message or "Task failed" in log_entry.message:
|
||||
# Wait a bit to ensure client receives the last message
|
||||
await asyncio.sleep(2)
|
||||
# DO NOT BREAK here - allow client to keep connection open if they want to review logs
|
||||
# or until they disconnect. Breaking closes the socket immediately.
|
||||
# break
|
||||
|
||||
except WebSocketDisconnect:
|
||||
logger.info(f"WebSocket connection disconnected for task {task_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error for task {task_id}: {e}")
|
||||
finally:
|
||||
task_manager.unsubscribe_logs(task_id, queue)
|
||||
except WebSocketDisconnect:
|
||||
logger.reason(
|
||||
"WebSocket client disconnected from task log stream",
|
||||
extra={"task_id": task_id},
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"WebSocket log streaming encountered an unexpected failure",
|
||||
extra={"task_id": task_id, "error": str(exc)},
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
task_manager.unsubscribe_logs(task_id, queue)
|
||||
logger.reflect(
|
||||
"Released WebSocket log queue subscription",
|
||||
extra={"task_id": task_id},
|
||||
)
|
||||
# [/DEF:websocket_endpoint:Function]
|
||||
|
||||
# [DEF:StaticFiles:Mount]
|
||||
|
||||
@@ -5,8 +5,10 @@
|
||||
# @LAYER: Domain
|
||||
# @RELATION: VERIFIES -> ConfigManager
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
from src.core.config_manager import ConfigManager
|
||||
from src.core.config_models import AppConfig, GlobalSettings
|
||||
from src.core.config_models import AppConfig, Environment, GlobalSettings
|
||||
|
||||
|
||||
# [DEF:test_get_payload_preserves_legacy_sections:Function]
|
||||
@@ -48,6 +50,115 @@ def test_save_config_accepts_raw_payload_and_keeps_extras(monkeypatch):
|
||||
assert manager.raw_payload["notifications"]["telegram"]["bot_token"] == "secret"
|
||||
assert manager.config.settings.migration_sync_cron == "0 2 * * *"
|
||||
assert persisted["payload"]["notifications"]["telegram"]["bot_token"] == "secret"
|
||||
# [/DEF:test_save_config_accepts_raw_payload_and_keeps_extras:Function]
|
||||
|
||||
|
||||
# [DEF:test_save_config_syncs_environment_records_for_fk_backed_flows:Function]
|
||||
# @PURPOSE: Ensure saving config mirrors typed environments into relational records required by FK-backed session persistence.
|
||||
def test_save_config_syncs_environment_records_for_fk_backed_flows():
|
||||
manager = ConfigManager.__new__(ConfigManager)
|
||||
manager.raw_payload = {}
|
||||
manager.config = AppConfig(environments=[], settings=GlobalSettings())
|
||||
|
||||
added_records = []
|
||||
deleted_records = []
|
||||
existing_record = SimpleNamespace(
|
||||
id="legacy-env",
|
||||
name="Legacy",
|
||||
url="http://legacy.local",
|
||||
credentials_id="legacy-user",
|
||||
)
|
||||
|
||||
class _FakeQuery:
|
||||
def all(self):
|
||||
return [existing_record]
|
||||
|
||||
class _FakeSession:
|
||||
def query(self, model):
|
||||
return _FakeQuery()
|
||||
|
||||
def add(self, value):
|
||||
added_records.append(value)
|
||||
|
||||
def delete(self, value):
|
||||
deleted_records.append(value)
|
||||
|
||||
session = _FakeSession()
|
||||
config = AppConfig(
|
||||
environments=[
|
||||
Environment(
|
||||
id="dev",
|
||||
name="DEV",
|
||||
url="http://superset.local",
|
||||
username="demo",
|
||||
password="secret",
|
||||
)
|
||||
],
|
||||
settings=GlobalSettings(),
|
||||
)
|
||||
|
||||
manager._sync_environment_records(session, config)
|
||||
|
||||
assert len(added_records) == 1
|
||||
assert added_records[0].id == "dev"
|
||||
assert added_records[0].name == "DEV"
|
||||
assert added_records[0].url == "http://superset.local"
|
||||
assert added_records[0].credentials_id == "demo"
|
||||
assert deleted_records == [existing_record]
|
||||
# [/DEF:test_save_config_syncs_environment_records_for_fk_backed_flows:Function]
|
||||
|
||||
|
||||
# [DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function]
|
||||
# @PURPOSE: Ensure loading an existing DB-backed config also mirrors environment rows required by FK-backed runtime flows.
|
||||
def test_load_config_syncs_environment_records_from_existing_db_payload(monkeypatch):
|
||||
manager = ConfigManager.__new__(ConfigManager)
|
||||
manager.config_path = None
|
||||
manager.raw_payload = {}
|
||||
manager.config = AppConfig(environments=[], settings=GlobalSettings())
|
||||
|
||||
sync_calls = []
|
||||
closed = {"value": False}
|
||||
committed = {"value": False}
|
||||
|
||||
class _FakeSession:
|
||||
def commit(self):
|
||||
committed["value"] = True
|
||||
|
||||
def close(self):
|
||||
closed["value"] = True
|
||||
|
||||
fake_session = _FakeSession()
|
||||
fake_record = SimpleNamespace(
|
||||
id="global",
|
||||
payload={
|
||||
"environments": [
|
||||
{
|
||||
"id": "dev",
|
||||
"name": "DEV",
|
||||
"url": "http://superset.local",
|
||||
"username": "demo",
|
||||
"password": "secret",
|
||||
}
|
||||
],
|
||||
"settings": GlobalSettings().model_dump(),
|
||||
},
|
||||
)
|
||||
|
||||
monkeypatch.setattr("src.core.config_manager.SessionLocal", lambda: fake_session)
|
||||
monkeypatch.setattr(manager, "_get_record", lambda session: fake_record)
|
||||
monkeypatch.setattr(
|
||||
manager,
|
||||
"_sync_environment_records",
|
||||
lambda session, config: sync_calls.append((session, config)),
|
||||
)
|
||||
|
||||
config = manager._load_config()
|
||||
|
||||
assert config.environments[0].id == "dev"
|
||||
assert len(sync_calls) == 1
|
||||
assert sync_calls[0][0] is fake_session
|
||||
assert sync_calls[0][1].environments[0].id == "dev"
|
||||
assert committed["value"] is True
|
||||
assert closed["value"] is True
|
||||
# [/DEF:test_load_config_syncs_environment_records_from_existing_db_payload:Function]
|
||||
|
||||
# [/DEF:backend.src.core.__tests__.test_config_manager_compat:Module]
|
||||
|
||||
196
backend/src/core/__tests__/test_superset_preview_pipeline.py
Normal file
196
backend/src/core/__tests__/test_superset_preview_pipeline.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# [DEF:SupersetPreviewPipelineTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: tests, superset, preview, chart_data, network, 404-mapping
|
||||
# @PURPOSE: Verify explicit chart-data preview compilation and ensure non-dashboard 404 errors remain generic across sync and async clients.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [BINDS_TO] ->[SupersetClient]
|
||||
# @RELATION: [BINDS_TO] ->[APIClient]
|
||||
# @RELATION: [BINDS_TO] ->[AsyncAPIClient]
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from src.core.config_models import Environment
|
||||
from src.core.superset_client import SupersetClient
|
||||
from src.core.utils.async_network import AsyncAPIClient
|
||||
from src.core.utils.network import APIClient, DashboardNotFoundError, SupersetAPIError
|
||||
|
||||
|
||||
# [DEF:_make_environment:Function]
|
||||
def _make_environment() -> Environment:
|
||||
return Environment(
|
||||
id="env-1",
|
||||
name="DEV",
|
||||
url="http://superset.local",
|
||||
username="demo",
|
||||
password="secret",
|
||||
)
|
||||
# [/DEF:_make_environment:Function]
|
||||
|
||||
|
||||
# [DEF:_make_requests_http_error:Function]
|
||||
def _make_requests_http_error(status_code: int, url: str) -> requests.exceptions.HTTPError:
|
||||
response = requests.Response()
|
||||
response.status_code = status_code
|
||||
response.url = url
|
||||
response._content = b'{"message":"not found"}'
|
||||
request = requests.Request("GET", url).prepare()
|
||||
response.request = request
|
||||
return requests.exceptions.HTTPError(response=response, request=request)
|
||||
# [/DEF:_make_requests_http_error:Function]
|
||||
|
||||
|
||||
# [DEF:_make_httpx_status_error:Function]
|
||||
def _make_httpx_status_error(status_code: int, url: str) -> httpx.HTTPStatusError:
|
||||
request = httpx.Request("GET", url)
|
||||
response = httpx.Response(status_code=status_code, request=request, text='{"message":"not found"}')
|
||||
return httpx.HTTPStatusError("upstream error", request=request, response=response)
|
||||
# [/DEF:_make_httpx_status_error:Function]
|
||||
|
||||
|
||||
# [DEF:test_compile_dataset_preview_uses_chart_data_and_result_query_sql:Function]
|
||||
# @PURPOSE: Superset preview compilation should call the real chart-data endpoint and extract SQL from result[].query.
|
||||
def test_compile_dataset_preview_uses_chart_data_and_result_query_sql():
|
||||
client = SupersetClient(_make_environment())
|
||||
client.get_dataset = MagicMock(
|
||||
return_value={
|
||||
"result": {
|
||||
"id": 42,
|
||||
"schema": "public",
|
||||
"datasource": {"id": 42, "type": "table"},
|
||||
"result_format": "json",
|
||||
"result_type": "full",
|
||||
}
|
||||
}
|
||||
)
|
||||
client.network = MagicMock()
|
||||
client.network.request.return_value = {
|
||||
"result": [
|
||||
{
|
||||
"query": "SELECT count(*) FROM public.sales WHERE country IN ('DE')",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
result = client.compile_dataset_preview(
|
||||
dataset_id=42,
|
||||
template_params={"country": "DE"},
|
||||
effective_filters=[{"filter_name": "country", "effective_value": ["DE"]}],
|
||||
)
|
||||
|
||||
assert result["compiled_sql"] == "SELECT count(*) FROM public.sales WHERE country IN ('DE')"
|
||||
client.network.request.assert_called_once()
|
||||
request_call = client.network.request.call_args
|
||||
assert request_call.kwargs["method"] == "POST"
|
||||
assert request_call.kwargs["endpoint"] == "/chart/data"
|
||||
assert request_call.kwargs["headers"] == {"Content-Type": "application/json"}
|
||||
|
||||
query_context = json.loads(request_call.kwargs["data"])
|
||||
assert query_context["datasource"] == {"id": 42, "type": "table"}
|
||||
assert query_context["queries"][0]["filters"] == [
|
||||
{"col": "country", "op": "IN", "val": ["DE"]}
|
||||
]
|
||||
assert query_context["queries"][0]["url_params"] == {"country": "DE"}
|
||||
|
||||
assert result["query_context"]["datasource"] == {"id": 42, "type": "table"}
|
||||
assert result["query_context"]["queries"][0]["filters"] == [
|
||||
{"col": "country", "op": "IN", "val": ["DE"]}
|
||||
]
|
||||
# [/DEF:test_compile_dataset_preview_uses_chart_data_and_result_query_sql:Function]
|
||||
|
||||
|
||||
# [DEF:test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
|
||||
# @PURPOSE: Sync network client should reserve dashboard-not-found translation for dashboard endpoints only.
|
||||
def test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic():
|
||||
client = APIClient(
|
||||
config={
|
||||
"base_url": "http://superset.local",
|
||||
"auth": {"username": "demo", "password": "secret"},
|
||||
}
|
||||
)
|
||||
|
||||
with pytest.raises(SupersetAPIError) as exc_info:
|
||||
client._handle_http_error(
|
||||
_make_requests_http_error(404, "http://superset.local/api/v1/chart/data"),
|
||||
"/chart/data",
|
||||
)
|
||||
|
||||
assert not isinstance(exc_info.value, DashboardNotFoundError)
|
||||
assert "API resource not found at endpoint '/chart/data'" in str(exc_info.value)
|
||||
# [/DEF:test_sync_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
|
||||
|
||||
|
||||
# [DEF:test_sync_network_404_mapping_translates_dashboard_endpoints:Function]
|
||||
# @PURPOSE: Sync network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors.
|
||||
def test_sync_network_404_mapping_translates_dashboard_endpoints():
|
||||
client = APIClient(
|
||||
config={
|
||||
"base_url": "http://superset.local",
|
||||
"auth": {"username": "demo", "password": "secret"},
|
||||
}
|
||||
)
|
||||
|
||||
with pytest.raises(DashboardNotFoundError) as exc_info:
|
||||
client._handle_http_error(
|
||||
_make_requests_http_error(404, "http://superset.local/api/v1/dashboard/10"),
|
||||
"/dashboard/10",
|
||||
)
|
||||
|
||||
assert "Dashboard '/dashboard/10' Dashboard not found" in str(exc_info.value)
|
||||
# [/DEF:test_sync_network_404_mapping_translates_dashboard_endpoints:Function]
|
||||
|
||||
|
||||
# [DEF:test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
|
||||
# @PURPOSE: Async network client should reserve dashboard-not-found translation for dashboard endpoints only.
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic():
|
||||
client = AsyncAPIClient(
|
||||
config={
|
||||
"base_url": "http://superset.local",
|
||||
"auth": {"username": "demo", "password": "secret"},
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
with pytest.raises(SupersetAPIError) as exc_info:
|
||||
client._handle_http_error(
|
||||
_make_httpx_status_error(404, "http://superset.local/api/v1/chart/data"),
|
||||
"/chart/data",
|
||||
)
|
||||
|
||||
assert not isinstance(exc_info.value, DashboardNotFoundError)
|
||||
assert "API resource not found at endpoint '/chart/data'" in str(exc_info.value)
|
||||
finally:
|
||||
await client.aclose()
|
||||
# [/DEF:test_async_network_404_mapping_keeps_non_dashboard_endpoints_generic:Function]
|
||||
|
||||
|
||||
# [DEF:test_async_network_404_mapping_translates_dashboard_endpoints:Function]
|
||||
# @PURPOSE: Async network client should still translate dashboard endpoint 404 responses into dashboard-not-found errors.
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_network_404_mapping_translates_dashboard_endpoints():
|
||||
client = AsyncAPIClient(
|
||||
config={
|
||||
"base_url": "http://superset.local",
|
||||
"auth": {"username": "demo", "password": "secret"},
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
with pytest.raises(DashboardNotFoundError) as exc_info:
|
||||
client._handle_http_error(
|
||||
_make_httpx_status_error(404, "http://superset.local/api/v1/dashboard/10"),
|
||||
"/dashboard/10",
|
||||
)
|
||||
|
||||
assert "Dashboard '/dashboard/10' Dashboard not found" in str(exc_info.value)
|
||||
finally:
|
||||
await client.aclose()
|
||||
# [/DEF:test_async_network_404_mapping_translates_dashboard_endpoints:Function]
|
||||
|
||||
|
||||
# [/DEF:SupersetPreviewPipelineTests:Module]
|
||||
@@ -12,7 +12,6 @@
|
||||
# @RELATION: [DEPENDS_ON] ->[AppConfig]
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionLocal]
|
||||
# @RELATION: [DEPENDS_ON] ->[AppConfigRecord]
|
||||
# @RELATION: [DEPENDS_ON] ->[FileIO]
|
||||
# @RELATION: [CALLS] ->[logger]
|
||||
# @RELATION: [CALLS] ->[configure_logger]
|
||||
#
|
||||
@@ -23,9 +22,10 @@ from typing import Any, Optional, List
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from .config_models import AppConfig, Environment, GlobalSettings, StorageConfig
|
||||
from .config_models import AppConfig, Environment, GlobalSettings
|
||||
from .database import SessionLocal
|
||||
from ..models.config import AppConfigRecord
|
||||
from ..models.mapping import Environment as EnvironmentRecord
|
||||
from .logger import logger, configure_logger, belief_scope
|
||||
|
||||
|
||||
@@ -62,5 +62,431 @@ class ConfigManager:
|
||||
|
||||
logger.reflect("ConfigManager initialization complete")
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:_default_config:Function]
|
||||
# @PURPOSE: Build default application configuration fallback.
|
||||
def _default_config(self) -> AppConfig:
|
||||
with belief_scope("ConfigManager._default_config"):
|
||||
logger.reason("Building default AppConfig fallback")
|
||||
return AppConfig(environments=[], settings=GlobalSettings())
|
||||
# [/DEF:_default_config:Function]
|
||||
|
||||
# [DEF:_sync_raw_payload_from_config:Function]
|
||||
# @PURPOSE: Merge typed AppConfig state into raw payload while preserving unsupported legacy sections.
|
||||
def _sync_raw_payload_from_config(self) -> dict[str, Any]:
|
||||
with belief_scope("ConfigManager._sync_raw_payload_from_config"):
|
||||
typed_payload = self.config.model_dump()
|
||||
merged_payload = dict(self.raw_payload or {})
|
||||
merged_payload["environments"] = typed_payload.get("environments", [])
|
||||
merged_payload["settings"] = typed_payload.get("settings", {})
|
||||
self.raw_payload = merged_payload
|
||||
logger.reason(
|
||||
"Synchronized raw payload from typed config",
|
||||
extra={
|
||||
"environments_count": len(merged_payload.get("environments", []) or []),
|
||||
"has_settings": "settings" in merged_payload,
|
||||
"extra_sections": sorted(
|
||||
key for key in merged_payload.keys() if key not in {"environments", "settings"}
|
||||
),
|
||||
},
|
||||
)
|
||||
return merged_payload
|
||||
# [/DEF:_sync_raw_payload_from_config:Function]
|
||||
|
||||
# [DEF:_load_from_legacy_file:Function]
|
||||
# @PURPOSE: Load legacy JSON configuration for migration fallback path.
|
||||
def _load_from_legacy_file(self) -> dict[str, Any]:
|
||||
with belief_scope("ConfigManager._load_from_legacy_file"):
|
||||
if not self.config_path.exists():
|
||||
logger.reason(
|
||||
"Legacy config file not found; using default payload",
|
||||
extra={"path": str(self.config_path)},
|
||||
)
|
||||
return {}
|
||||
|
||||
logger.reason("Loading legacy config file", extra={"path": str(self.config_path)})
|
||||
with self.config_path.open("r", encoding="utf-8") as fh:
|
||||
payload = json.load(fh)
|
||||
|
||||
if not isinstance(payload, dict):
|
||||
logger.explore(
|
||||
"Legacy config payload is not a JSON object",
|
||||
extra={"path": str(self.config_path), "type": type(payload).__name__},
|
||||
)
|
||||
raise ValueError("Legacy config payload must be a JSON object")
|
||||
|
||||
logger.reason(
|
||||
"Legacy config file loaded successfully",
|
||||
extra={"path": str(self.config_path), "keys": sorted(payload.keys())},
|
||||
)
|
||||
return payload
|
||||
# [/DEF:_load_from_legacy_file:Function]
|
||||
|
||||
# [DEF:_get_record:Function]
|
||||
# @PURPOSE: Resolve global configuration record from DB.
|
||||
def _get_record(self, session: Session) -> Optional[AppConfigRecord]:
|
||||
with belief_scope("ConfigManager._get_record"):
|
||||
record = session.query(AppConfigRecord).filter(AppConfigRecord.id == "global").first()
|
||||
logger.reason("Resolved app config record", extra={"exists": record is not None})
|
||||
return record
|
||||
# [/DEF:_get_record:Function]
|
||||
|
||||
# [DEF:_load_config:Function]
|
||||
# @PURPOSE: Load configuration from DB or perform one-time migration from legacy JSON.
|
||||
def _load_config(self) -> AppConfig:
|
||||
with belief_scope("ConfigManager._load_config"):
|
||||
session = SessionLocal()
|
||||
try:
|
||||
record = self._get_record(session)
|
||||
if record and isinstance(record.payload, dict):
|
||||
logger.reason("Loading configuration from database", extra={"record_id": record.id})
|
||||
self.raw_payload = dict(record.payload)
|
||||
config = AppConfig.model_validate(
|
||||
{
|
||||
"environments": self.raw_payload.get("environments", []),
|
||||
"settings": self.raw_payload.get("settings", {}),
|
||||
}
|
||||
)
|
||||
self._sync_environment_records(session, config)
|
||||
session.commit()
|
||||
logger.reason(
|
||||
"Database configuration validated successfully",
|
||||
extra={
|
||||
"environments_count": len(config.environments),
|
||||
"payload_keys": sorted(self.raw_payload.keys()),
|
||||
},
|
||||
)
|
||||
return config
|
||||
|
||||
logger.reason(
|
||||
"Database configuration record missing; attempting legacy file migration",
|
||||
extra={"legacy_path": str(self.config_path)},
|
||||
)
|
||||
legacy_payload = self._load_from_legacy_file()
|
||||
|
||||
if legacy_payload:
|
||||
self.raw_payload = dict(legacy_payload)
|
||||
config = AppConfig.model_validate(
|
||||
{
|
||||
"environments": self.raw_payload.get("environments", []),
|
||||
"settings": self.raw_payload.get("settings", {}),
|
||||
}
|
||||
)
|
||||
logger.reason(
|
||||
"Legacy payload validated; persisting migrated configuration to database",
|
||||
extra={
|
||||
"environments_count": len(config.environments),
|
||||
"payload_keys": sorted(self.raw_payload.keys()),
|
||||
},
|
||||
)
|
||||
self._save_config_to_db(config, session=session)
|
||||
return config
|
||||
|
||||
logger.reason("No persisted config found; falling back to default configuration")
|
||||
config = self._default_config()
|
||||
self.raw_payload = config.model_dump()
|
||||
self._save_config_to_db(config, session=session)
|
||||
return config
|
||||
except (json.JSONDecodeError, TypeError, ValueError) as exc:
|
||||
logger.explore(
|
||||
"Recoverable config load failure; falling back to default configuration",
|
||||
extra={"error": str(exc), "legacy_path": str(self.config_path)},
|
||||
)
|
||||
config = self._default_config()
|
||||
self.raw_payload = config.model_dump()
|
||||
return config
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"Critical config load failure; re-raising persistence or validation error",
|
||||
extra={"error": str(exc)},
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:_load_config:Function]
|
||||
|
||||
# [DEF:_sync_environment_records:Function]
|
||||
# @PURPOSE: Mirror configured environments into the relational environments table used by FK-backed domain models.
|
||||
def _sync_environment_records(self, session: Session, config: AppConfig) -> None:
|
||||
with belief_scope("ConfigManager._sync_environment_records"):
|
||||
configured_envs = list(config.environments or [])
|
||||
configured_ids = {
|
||||
str(environment.id or "").strip()
|
||||
for environment in configured_envs
|
||||
if str(environment.id or "").strip()
|
||||
}
|
||||
|
||||
persisted_records = session.query(EnvironmentRecord).all()
|
||||
persisted_by_id = {str(record.id or "").strip(): record for record in persisted_records}
|
||||
|
||||
for environment in configured_envs:
|
||||
normalized_id = str(environment.id or "").strip()
|
||||
if not normalized_id:
|
||||
continue
|
||||
|
||||
display_name = str(environment.name or normalized_id).strip() or normalized_id
|
||||
normalized_url = str(environment.url or "").strip()
|
||||
credentials_id = str(environment.username or "").strip() or normalized_id
|
||||
|
||||
record = persisted_by_id.get(normalized_id)
|
||||
if record is None:
|
||||
logger.reason(
|
||||
"Creating relational environment record from typed config",
|
||||
extra={"environment_id": normalized_id, "environment_name": display_name},
|
||||
)
|
||||
session.add(
|
||||
EnvironmentRecord(
|
||||
id=normalized_id,
|
||||
name=display_name,
|
||||
url=normalized_url,
|
||||
credentials_id=credentials_id,
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
record.name = display_name
|
||||
record.url = normalized_url
|
||||
record.credentials_id = credentials_id
|
||||
|
||||
for record in persisted_records:
|
||||
normalized_id = str(record.id or "").strip()
|
||||
if normalized_id and normalized_id not in configured_ids:
|
||||
logger.reason(
|
||||
"Removing stale relational environment record absent from typed config",
|
||||
extra={"environment_id": normalized_id},
|
||||
)
|
||||
session.delete(record)
|
||||
|
||||
# [/DEF:_sync_environment_records:Function]
|
||||
|
||||
# [DEF:_save_config_to_db:Function]
|
||||
# @PURPOSE: Persist provided AppConfig into the global DB configuration record.
|
||||
def _save_config_to_db(self, config: AppConfig, session: Optional[Session] = None) -> None:
|
||||
with belief_scope("ConfigManager._save_config_to_db"):
|
||||
owns_session = session is None
|
||||
db = session or SessionLocal()
|
||||
try:
|
||||
self.config = config
|
||||
payload = self._sync_raw_payload_from_config()
|
||||
record = self._get_record(db)
|
||||
if record is None:
|
||||
logger.reason("Creating new global app config record")
|
||||
record = AppConfigRecord(id="global", payload=payload)
|
||||
db.add(record)
|
||||
else:
|
||||
logger.reason("Updating existing global app config record", extra={"record_id": record.id})
|
||||
record.payload = payload
|
||||
|
||||
self._sync_environment_records(db, config)
|
||||
|
||||
db.commit()
|
||||
logger.reason(
|
||||
"Configuration persisted to database",
|
||||
extra={
|
||||
"environments_count": len(payload.get("environments", []) or []),
|
||||
"payload_keys": sorted(payload.keys()),
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
db.rollback()
|
||||
logger.explore("Database save failed; transaction rolled back")
|
||||
raise
|
||||
finally:
|
||||
if owns_session:
|
||||
db.close()
|
||||
# [/DEF:_save_config_to_db:Function]
|
||||
|
||||
# [DEF:save:Function]
|
||||
# @PURPOSE: Persist current in-memory configuration state.
|
||||
def save(self) -> None:
|
||||
with belief_scope("ConfigManager.save"):
|
||||
logger.reason("Persisting current in-memory configuration")
|
||||
self._save_config_to_db(self.config)
|
||||
# [/DEF:save:Function]
|
||||
|
||||
# [DEF:get_config:Function]
|
||||
# @PURPOSE: Return current in-memory configuration snapshot.
|
||||
def get_config(self) -> AppConfig:
|
||||
with belief_scope("ConfigManager.get_config"):
|
||||
return self.config
|
||||
# [/DEF:get_config:Function]
|
||||
|
||||
# [DEF:get_payload:Function]
|
||||
# @PURPOSE: Return full persisted payload including sections outside typed AppConfig schema.
|
||||
def get_payload(self) -> dict[str, Any]:
|
||||
with belief_scope("ConfigManager.get_payload"):
|
||||
return self._sync_raw_payload_from_config()
|
||||
# [/DEF:get_payload:Function]
|
||||
|
||||
# [DEF:save_config:Function]
|
||||
# @PURPOSE: Persist configuration provided either as typed AppConfig or raw payload dict.
|
||||
def save_config(self, config: Any) -> AppConfig:
|
||||
with belief_scope("ConfigManager.save_config"):
|
||||
if isinstance(config, AppConfig):
|
||||
logger.reason("Saving typed AppConfig payload")
|
||||
self.config = config
|
||||
self.raw_payload = config.model_dump()
|
||||
self._save_config_to_db(config)
|
||||
return self.config
|
||||
|
||||
if isinstance(config, dict):
|
||||
logger.reason(
|
||||
"Saving raw config payload",
|
||||
extra={"keys": sorted(config.keys())},
|
||||
)
|
||||
self.raw_payload = dict(config)
|
||||
typed_config = AppConfig.model_validate(
|
||||
{
|
||||
"environments": self.raw_payload.get("environments", []),
|
||||
"settings": self.raw_payload.get("settings", {}),
|
||||
}
|
||||
)
|
||||
self.config = typed_config
|
||||
self._save_config_to_db(typed_config)
|
||||
return self.config
|
||||
|
||||
logger.explore("Unsupported config type supplied to save_config", extra={"type": type(config).__name__})
|
||||
raise TypeError("config must be AppConfig or dict")
|
||||
# [/DEF:save_config:Function]
|
||||
|
||||
# [DEF:update_global_settings:Function]
|
||||
# @PURPOSE: Replace global settings and persist the resulting configuration.
|
||||
def update_global_settings(self, settings: GlobalSettings) -> AppConfig:
|
||||
with belief_scope("ConfigManager.update_global_settings"):
|
||||
logger.reason("Updating global settings")
|
||||
self.config.settings = settings
|
||||
self.save()
|
||||
return self.config
|
||||
# [/DEF:update_global_settings:Function]
|
||||
|
||||
# [DEF:validate_path:Function]
|
||||
# @PURPOSE: Validate that path exists and is writable, creating it when absent.
|
||||
def validate_path(self, path: str) -> tuple[bool, str]:
|
||||
with belief_scope("ConfigManager.validate_path", f"path={path}"):
|
||||
try:
|
||||
target = Path(path).expanduser()
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not target.exists():
|
||||
return False, f"Path does not exist: {target}"
|
||||
|
||||
if not target.is_dir():
|
||||
return False, f"Path is not a directory: {target}"
|
||||
|
||||
test_file = target / ".write_test"
|
||||
with test_file.open("w", encoding="utf-8") as fh:
|
||||
fh.write("ok")
|
||||
test_file.unlink(missing_ok=True)
|
||||
|
||||
logger.reason("Path validation succeeded", extra={"path": str(target)})
|
||||
return True, "OK"
|
||||
except Exception as exc:
|
||||
logger.explore("Path validation failed", extra={"path": path, "error": str(exc)})
|
||||
return False, str(exc)
|
||||
# [/DEF:validate_path:Function]
|
||||
|
||||
# [DEF:get_environments:Function]
|
||||
# @PURPOSE: Return all configured environments.
|
||||
def get_environments(self) -> List[Environment]:
|
||||
with belief_scope("ConfigManager.get_environments"):
|
||||
return list(self.config.environments)
|
||||
# [/DEF:get_environments:Function]
|
||||
|
||||
# [DEF:has_environments:Function]
|
||||
# @PURPOSE: Check whether at least one environment exists in configuration.
|
||||
def has_environments(self) -> bool:
|
||||
with belief_scope("ConfigManager.has_environments"):
|
||||
return len(self.config.environments) > 0
|
||||
# [/DEF:has_environments:Function]
|
||||
|
||||
# [DEF:get_environment:Function]
|
||||
# @PURPOSE: Resolve a configured environment by identifier.
|
||||
def get_environment(self, env_id: str) -> Optional[Environment]:
|
||||
with belief_scope("ConfigManager.get_environment", f"env_id={env_id}"):
|
||||
normalized = str(env_id or "").strip()
|
||||
if not normalized:
|
||||
return None
|
||||
|
||||
for env in self.config.environments:
|
||||
if env.id == normalized or env.name == normalized:
|
||||
return env
|
||||
return None
|
||||
# [/DEF:get_environment:Function]
|
||||
|
||||
# [DEF:add_environment:Function]
|
||||
# @PURPOSE: Upsert environment by id into configuration and persist.
|
||||
def add_environment(self, env: Environment) -> AppConfig:
|
||||
with belief_scope("ConfigManager.add_environment", f"env_id={env.id}"):
|
||||
existing_index = next((i for i, item in enumerate(self.config.environments) if item.id == env.id), None)
|
||||
if env.is_default:
|
||||
for item in self.config.environments:
|
||||
item.is_default = False
|
||||
|
||||
if existing_index is None:
|
||||
logger.reason("Appending new environment", extra={"env_id": env.id})
|
||||
self.config.environments.append(env)
|
||||
else:
|
||||
logger.reason("Replacing existing environment during add", extra={"env_id": env.id})
|
||||
self.config.environments[existing_index] = env
|
||||
|
||||
if len(self.config.environments) == 1 and not any(item.is_default for item in self.config.environments):
|
||||
self.config.environments[0].is_default = True
|
||||
|
||||
self.save()
|
||||
return self.config
|
||||
# [/DEF:add_environment:Function]
|
||||
|
||||
# [DEF:update_environment:Function]
|
||||
# @PURPOSE: Update existing environment by id and preserve masked password placeholder behavior.
|
||||
def update_environment(self, env_id: str, env: Environment) -> bool:
|
||||
with belief_scope("ConfigManager.update_environment", f"env_id={env_id}"):
|
||||
for index, existing in enumerate(self.config.environments):
|
||||
if existing.id != env_id:
|
||||
continue
|
||||
|
||||
update_data = env.model_dump()
|
||||
if update_data.get("password") == "********":
|
||||
update_data["password"] = existing.password
|
||||
|
||||
updated = Environment.model_validate(update_data)
|
||||
|
||||
if updated.is_default:
|
||||
for item in self.config.environments:
|
||||
item.is_default = False
|
||||
elif existing.is_default and not updated.is_default:
|
||||
updated.is_default = True
|
||||
|
||||
self.config.environments[index] = updated
|
||||
logger.reason("Environment updated", extra={"env_id": env_id})
|
||||
self.save()
|
||||
return True
|
||||
|
||||
logger.explore("Environment update skipped; env not found", extra={"env_id": env_id})
|
||||
return False
|
||||
# [/DEF:update_environment:Function]
|
||||
|
||||
# [DEF:delete_environment:Function]
|
||||
# @PURPOSE: Delete environment by id and persist when deletion occurs.
|
||||
def delete_environment(self, env_id: str) -> bool:
|
||||
with belief_scope("ConfigManager.delete_environment", f"env_id={env_id}"):
|
||||
before = len(self.config.environments)
|
||||
removed = [env for env in self.config.environments if env.id == env_id]
|
||||
self.config.environments = [env for env in self.config.environments if env.id != env_id]
|
||||
|
||||
if len(self.config.environments) == before:
|
||||
logger.explore("Environment delete skipped; env not found", extra={"env_id": env_id})
|
||||
return False
|
||||
|
||||
if removed and removed[0].is_default and self.config.environments:
|
||||
self.config.environments[0].is_default = True
|
||||
|
||||
if self.config.settings.default_environment_id == env_id:
|
||||
replacement = next((env.id for env in self.config.environments if env.is_default), None)
|
||||
self.config.settings.default_environment_id = replacement
|
||||
|
||||
logger.reason("Environment deleted", extra={"env_id": env_id, "remaining": len(self.config.environments)})
|
||||
self.save()
|
||||
return True
|
||||
# [/DEF:delete_environment:Function]
|
||||
# [/DEF:ConfigManager:Class]
|
||||
# [/DEF:ConfigManager:Module]
|
||||
|
||||
@@ -81,6 +81,11 @@ class GlobalSettings(BaseModel):
|
||||
|
||||
# Migration sync settings
|
||||
migration_sync_cron: str = "0 2 * * *"
|
||||
|
||||
# Dataset Review Feature Flags
|
||||
ff_dataset_auto_review: bool = True
|
||||
ff_dataset_clarification: bool = True
|
||||
ff_dataset_execution: bool = True
|
||||
# [/DEF:GlobalSettings:DataClass]
|
||||
|
||||
# [DEF:AppConfig:DataClass]
|
||||
|
||||
@@ -24,6 +24,7 @@ from ..models import assistant as _assistant_models # noqa: F401
|
||||
from ..models import profile as _profile_models # noqa: F401
|
||||
from ..models import clean_release as _clean_release_models # noqa: F401
|
||||
from ..models import connection as _connection_models # noqa: F401
|
||||
from ..models import dataset_review as _dataset_review_models # noqa: F401
|
||||
from .logger import belief_scope, logger
|
||||
from .auth.config import auth_config
|
||||
import os
|
||||
@@ -294,6 +295,62 @@ def _ensure_git_server_configs_columns(bind_engine):
|
||||
# [/DEF:_ensure_git_server_configs_columns:Function]
|
||||
|
||||
|
||||
# [DEF:_ensure_auth_users_columns:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Applies additive schema upgrades for auth users table.
|
||||
# @PRE: bind_engine points to authentication database.
|
||||
# @POST: Missing columns are added without data loss.
|
||||
def _ensure_auth_users_columns(bind_engine):
|
||||
with belief_scope("_ensure_auth_users_columns"):
|
||||
table_name = "users"
|
||||
inspector = inspect(bind_engine)
|
||||
if table_name not in inspector.get_table_names():
|
||||
return
|
||||
|
||||
existing_columns = {
|
||||
str(column.get("name") or "").strip()
|
||||
for column in inspector.get_columns(table_name)
|
||||
}
|
||||
|
||||
alter_statements = []
|
||||
if "full_name" not in existing_columns:
|
||||
alter_statements.append(
|
||||
"ALTER TABLE users ADD COLUMN full_name VARCHAR"
|
||||
)
|
||||
if "is_ad_user" not in existing_columns:
|
||||
alter_statements.append(
|
||||
"ALTER TABLE users ADD COLUMN is_ad_user BOOLEAN NOT NULL DEFAULT FALSE"
|
||||
)
|
||||
|
||||
if not alter_statements:
|
||||
logger.reason(
|
||||
"Auth users schema already up to date",
|
||||
extra={"table": table_name, "columns": sorted(existing_columns)},
|
||||
)
|
||||
return
|
||||
|
||||
logger.reason(
|
||||
"Applying additive auth users schema migration",
|
||||
extra={"table": table_name, "statements": alter_statements},
|
||||
)
|
||||
|
||||
try:
|
||||
with bind_engine.begin() as connection:
|
||||
for statement in alter_statements:
|
||||
connection.execute(text(statement))
|
||||
logger.reason(
|
||||
"Auth users schema migration completed",
|
||||
extra={"table": table_name, "added_columns": [stmt.split(" ADD COLUMN ", 1)[1].split()[0] for stmt in alter_statements]},
|
||||
)
|
||||
except Exception as migration_error:
|
||||
logger.warning(
|
||||
"[database][EXPLORE] Auth users additive migration failed: %s",
|
||||
migration_error,
|
||||
)
|
||||
raise
|
||||
# [/DEF:_ensure_auth_users_columns:Function]
|
||||
|
||||
|
||||
# [DEF:ensure_connection_configs_table:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Ensures the external connection registry table exists in the main database.
|
||||
@@ -327,6 +384,7 @@ def init_db():
|
||||
_ensure_llm_validation_results_columns(engine)
|
||||
_ensure_user_dashboard_preferences_health_columns(engine)
|
||||
_ensure_git_server_configs_columns(engine)
|
||||
_ensure_auth_users_columns(auth_engine)
|
||||
ensure_connection_configs_table(engine)
|
||||
# [/DEF:init_db:Function]
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ class MigrationEngine:
|
||||
with belief_scope("MigrationEngine._transform_yaml"):
|
||||
if not file_path.exists():
|
||||
logger.explore(f"YAML file not found: {file_path}")
|
||||
return
|
||||
raise FileNotFoundError(str(file_path))
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
# [DEF:backend.src.core.superset_client:Module]
|
||||
# [DEF:SupersetClientModule:Module]
|
||||
#
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: superset, api, client, rest, http, dashboard, dataset, import, export
|
||||
# @PURPOSE: Предоставляет высокоуровневый клиент для взаимодействия с Superset REST API, инкапсулируя логику запросов, обработку ошибок и пагинацию.
|
||||
# @LAYER: Core
|
||||
# @RELATION: USES -> backend.src.core.utils.network.APIClient
|
||||
# @RELATION: USES -> backend.src.core.config_models.Environment
|
||||
# @RELATION: [DEPENDS_ON] ->[APIClient]
|
||||
#
|
||||
# @INVARIANT: All network operations must use the internal APIClient instance.
|
||||
# @PUBLIC_API: SupersetClient
|
||||
@@ -14,6 +13,7 @@
|
||||
import json
|
||||
import re
|
||||
import zipfile
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
||||
from requests import Response
|
||||
@@ -24,18 +24,18 @@ from .utils.fileio import get_filename_from_headers
|
||||
from .config_models import Environment
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient:Class]
|
||||
# [DEF:SupersetClient:Class]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Класс-обёртка над Superset REST API, предоставляющий методы для работы с дашбордами и датасетами.
|
||||
# @RELATION: [DEPENDS_ON] ->[backend.src.core.utils.network.APIClient]
|
||||
# @RELATION: [DEPENDS_ON] ->[backend.src.core.config_models.Environment]
|
||||
# @RELATION: [DEPENDS_ON] ->[APIClient]
|
||||
class SupersetClient:
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.__init__:Function]
|
||||
# [DEF:SupersetClient.__init__:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Инициализирует клиент, проверяет конфигурацию и создает сетевой клиент.
|
||||
# @PRE: `env` должен быть валидным объектом Environment.
|
||||
# @POST: Атрибуты `env` и `network` созданы и готовы к работе.
|
||||
# @DATA_CONTRACT: Input[Environment] -> self.network[APIClient]
|
||||
# @RELATION: [DEPENDS_ON] ->[APIClient]
|
||||
def __init__(self, env: Environment):
|
||||
with belief_scope("__init__"):
|
||||
app_logger.info("[SupersetClient.__init__][Enter] Initializing SupersetClient for env %s.", env.name)
|
||||
@@ -57,22 +57,22 @@ class SupersetClient:
|
||||
)
|
||||
self.delete_before_reimport: bool = False
|
||||
app_logger.info("[SupersetClient.__init__][Exit] SupersetClient initialized.")
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.__init__:Function]
|
||||
# [/DEF:SupersetClient.__init__:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.authenticate:Function]
|
||||
# [DEF:SupersetClient.authenticate:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Authenticates the client using the configured credentials.
|
||||
# @PRE: self.network must be initialized with valid auth configuration.
|
||||
# @POST: Client is authenticated and tokens are stored.
|
||||
# @DATA_CONTRACT: None -> Output[Dict[str, str]]
|
||||
# @RELATION: [CALLS] ->[self.network.authenticate]
|
||||
# @RELATION: [CALLS] ->[APIClient.authenticate]
|
||||
def authenticate(self) -> Dict[str, str]:
|
||||
with belief_scope("SupersetClient.authenticate"):
|
||||
return self.network.authenticate()
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.authenticate:Function]
|
||||
# [/DEF:SupersetClient.authenticate:Function]
|
||||
|
||||
@property
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.headers:Function]
|
||||
# [DEF:SupersetClient.headers:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Возвращает базовые HTTP-заголовки, используемые сетевым клиентом.
|
||||
# @PRE: APIClient is initialized and authenticated.
|
||||
@@ -80,17 +80,17 @@ class SupersetClient:
|
||||
def headers(self) -> dict:
|
||||
with belief_scope("headers"):
|
||||
return self.network.headers
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.headers:Function]
|
||||
# [/DEF:SupersetClient.headers:Function]
|
||||
|
||||
# [SECTION: DASHBOARD OPERATIONS]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboards:Function]
|
||||
# [DEF:SupersetClient.get_dashboards:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Получает полный список дашбордов, автоматически обрабатывая пагинацию.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns a tuple with total count and list of dashboards.
|
||||
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self._fetch_all_pages]
|
||||
# @RELATION: [CALLS] ->[SupersetClient._fetch_all_pages]
|
||||
def get_dashboards(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
|
||||
with belief_scope("get_dashboards"):
|
||||
app_logger.info("[get_dashboards][Enter] Fetching dashboards.")
|
||||
@@ -116,15 +116,15 @@ class SupersetClient:
|
||||
total_count = len(paginated_data)
|
||||
app_logger.info("[get_dashboards][Exit] Found %d dashboards.", total_count)
|
||||
return total_count, paginated_data
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboards:Function]
|
||||
# [/DEF:SupersetClient.get_dashboards:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_page:Function]
|
||||
# [DEF:SupersetClient.get_dashboards_page:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches a single dashboards page from Superset without iterating all pages.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns total count and one page of dashboards.
|
||||
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def get_dashboards_page(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
|
||||
with belief_scope("get_dashboards_page"):
|
||||
validated_query = self._validate_query_params(query or {})
|
||||
@@ -153,15 +153,15 @@ class SupersetClient:
|
||||
result = response_json.get("result", [])
|
||||
total_count = response_json.get("count", len(result))
|
||||
return total_count, result
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_page:Function]
|
||||
# [/DEF:SupersetClient.get_dashboards_page:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_summary:Function]
|
||||
# [DEF:SupersetClient.get_dashboards_summary:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches dashboard metadata optimized for the grid.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns a list of dashboard metadata summaries.
|
||||
# @DATA_CONTRACT: None -> Output[List[Dict]]
|
||||
# @RELATION: [CALLS] ->[self.get_dashboards]
|
||||
# @RELATION: [CALLS] ->[SupersetClient.get_dashboards]
|
||||
def get_dashboards_summary(self, require_slug: bool = False) -> List[Dict]:
|
||||
with belief_scope("SupersetClient.get_dashboards_summary"):
|
||||
# Rely on list endpoint default projection to stay compatible
|
||||
@@ -238,15 +238,15 @@ class SupersetClient:
|
||||
f"sampled={min(len(result), max_debug_samples)})"
|
||||
)
|
||||
return result
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_summary:Function]
|
||||
# [/DEF:SupersetClient.get_dashboards_summary:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_summary_page:Function]
|
||||
# [DEF:SupersetClient.get_dashboards_summary_page:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches one page of dashboard metadata optimized for the grid.
|
||||
# @PRE: page >= 1 and page_size > 0.
|
||||
# @POST: Returns mapped summaries and total dashboard count.
|
||||
# @DATA_CONTRACT: Input[page: int, page_size: int] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self.get_dashboards_page]
|
||||
# @RELATION: [CALLS] ->[SupersetClient.get_dashboards_page]
|
||||
def get_dashboards_summary_page(
|
||||
self,
|
||||
page: int,
|
||||
@@ -313,7 +313,7 @@ class SupersetClient:
|
||||
return total_count, result
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboards_summary_page:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._extract_owner_labels:Function]
|
||||
# [DEF:SupersetClient._extract_owner_labels:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Normalize dashboard owners payload to stable display labels.
|
||||
# @PRE: owners payload can be scalar, object or list.
|
||||
@@ -339,9 +339,9 @@ class SupersetClient:
|
||||
if label and label not in normalized:
|
||||
normalized.append(label)
|
||||
return normalized
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._extract_owner_labels:Function]
|
||||
# [/DEF:SupersetClient._extract_owner_labels:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._extract_user_display:Function]
|
||||
# [DEF:SupersetClient._extract_user_display:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Normalize user payload to a stable display name.
|
||||
# @PRE: user payload can be string, dict or None.
|
||||
@@ -384,43 +384,59 @@ class SupersetClient:
|
||||
return normalized
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._sanitize_user_text:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboard:Function]
|
||||
# [DEF:SupersetClient.get_dashboard:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches a single dashboard by ID.
|
||||
# @PRE: Client is authenticated and dashboard_id exists.
|
||||
# @PURPOSE: Fetches a single dashboard by ID or slug.
|
||||
# @PRE: Client is authenticated and dashboard_ref exists.
|
||||
# @POST: Returns dashboard payload from Superset API.
|
||||
# @DATA_CONTRACT: Input[dashboard_id: int] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
def get_dashboard(self, dashboard_id: int) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dashboard", f"id={dashboard_id}"):
|
||||
response = self.network.request(method="GET", endpoint=f"/dashboard/{dashboard_id}")
|
||||
# @DATA_CONTRACT: Input[dashboard_ref: Union[int, str]] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def get_dashboard(self, dashboard_ref: Union[int, str]) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dashboard", f"ref={dashboard_ref}"):
|
||||
response = self.network.request(method="GET", endpoint=f"/dashboard/{dashboard_ref}")
|
||||
return cast(Dict, response)
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboard:Function]
|
||||
# [/DEF:SupersetClient.get_dashboard:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_chart:Function]
|
||||
# [DEF:SupersetClient.get_dashboard_permalink_state:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Fetches stored dashboard permalink state by permalink key.
|
||||
# @PRE: Client is authenticated and permalink key exists.
|
||||
# @POST: Returns dashboard permalink state payload from Superset API.
|
||||
# @DATA_CONTRACT: Input[permalink_key: str] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def get_dashboard_permalink_state(self, permalink_key: str) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dashboard_permalink_state", f"key={permalink_key}"):
|
||||
response = self.network.request(
|
||||
method="GET",
|
||||
endpoint=f"/dashboard/permalink/{permalink_key}"
|
||||
)
|
||||
return cast(Dict, response)
|
||||
# [/DEF:SupersetClient.get_dashboard_permalink_state:Function]
|
||||
|
||||
# [DEF:SupersetClient.get_chart:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches a single chart by ID.
|
||||
# @PRE: Client is authenticated and chart_id exists.
|
||||
# @POST: Returns chart payload from Superset API.
|
||||
# @DATA_CONTRACT: Input[chart_id: int] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def get_chart(self, chart_id: int) -> Dict:
|
||||
with belief_scope("SupersetClient.get_chart", f"id={chart_id}"):
|
||||
response = self.network.request(method="GET", endpoint=f"/chart/{chart_id}")
|
||||
return cast(Dict, response)
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_chart:Function]
|
||||
# [/DEF:SupersetClient.get_chart:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_dashboard_detail:Function]
|
||||
# [DEF:SupersetClient.get_dashboard_detail:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches detailed dashboard information including related charts and datasets.
|
||||
# @PRE: Client is authenticated and dashboard_id exists.
|
||||
# @PRE: Client is authenticated and dashboard reference exists.
|
||||
# @POST: Returns dashboard metadata with charts and datasets lists.
|
||||
# @DATA_CONTRACT: Input[dashboard_id: int] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[self.get_dashboard]
|
||||
# @RELATION: [CALLS] ->[self.get_chart]
|
||||
def get_dashboard_detail(self, dashboard_id: int) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dashboard_detail", f"id={dashboard_id}"):
|
||||
dashboard_response = self.get_dashboard(dashboard_id)
|
||||
# @DATA_CONTRACT: Input[dashboard_ref: Union[int, str]] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.get_dashboard]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.get_chart]
|
||||
def get_dashboard_detail(self, dashboard_ref: Union[int, str]) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dashboard_detail", f"ref={dashboard_ref}"):
|
||||
dashboard_response = self.get_dashboard(dashboard_ref)
|
||||
dashboard_data = dashboard_response.get("result", dashboard_response)
|
||||
|
||||
charts: List[Dict] = []
|
||||
@@ -456,7 +472,7 @@ class SupersetClient:
|
||||
try:
|
||||
charts_response = self.network.request(
|
||||
method="GET",
|
||||
endpoint=f"/dashboard/{dashboard_id}/charts"
|
||||
endpoint=f"/dashboard/{dashboard_ref}/charts"
|
||||
)
|
||||
charts_payload = charts_response.get("result", []) if isinstance(charts_response, dict) else []
|
||||
for chart_obj in charts_payload:
|
||||
@@ -486,7 +502,7 @@ class SupersetClient:
|
||||
try:
|
||||
datasets_response = self.network.request(
|
||||
method="GET",
|
||||
endpoint=f"/dashboard/{dashboard_id}/datasets"
|
||||
endpoint=f"/dashboard/{dashboard_ref}/datasets"
|
||||
)
|
||||
datasets_payload = datasets_response.get("result", []) if isinstance(datasets_response, dict) else []
|
||||
for dataset_obj in datasets_payload:
|
||||
@@ -592,9 +608,10 @@ class SupersetClient:
|
||||
for dataset in datasets:
|
||||
unique_datasets[dataset["id"]] = dataset
|
||||
|
||||
resolved_dashboard_id = dashboard_data.get("id", dashboard_ref)
|
||||
return {
|
||||
"id": dashboard_data.get("id", dashboard_id),
|
||||
"title": dashboard_data.get("dashboard_title") or dashboard_data.get("title") or f"Dashboard {dashboard_id}",
|
||||
"id": resolved_dashboard_id,
|
||||
"title": dashboard_data.get("dashboard_title") or dashboard_data.get("title") or f"Dashboard {resolved_dashboard_id}",
|
||||
"slug": dashboard_data.get("slug"),
|
||||
"url": dashboard_data.get("url"),
|
||||
"description": dashboard_data.get("description") or "",
|
||||
@@ -607,13 +624,13 @@ class SupersetClient:
|
||||
}
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dashboard_detail:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_charts:Function]
|
||||
# [DEF:SupersetClient.get_charts:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches all charts with pagination support.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns total count and charts list.
|
||||
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self._fetch_all_pages]
|
||||
# @RELATION: [CALLS] ->[SupersetClient._fetch_all_pages]
|
||||
def get_charts(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
|
||||
with belief_scope("get_charts"):
|
||||
validated_query = self._validate_query_params(query or {})
|
||||
@@ -625,9 +642,9 @@ class SupersetClient:
|
||||
pagination_options={"base_query": validated_query, "results_field": "result"},
|
||||
)
|
||||
return len(paginated_data), paginated_data
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_charts:Function]
|
||||
# [/DEF:SupersetClient.get_charts:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._extract_chart_ids_from_layout:Function]
|
||||
# [DEF:SupersetClient._extract_chart_ids_from_layout:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Traverses dashboard layout metadata and extracts chart IDs from common keys.
|
||||
# @PRE: payload can be dict/list/scalar.
|
||||
@@ -667,7 +684,7 @@ class SupersetClient:
|
||||
# @POST: Returns ZIP content and filename.
|
||||
# @DATA_CONTRACT: Input[dashboard_id: int] -> Output[Tuple[bytes, str]]
|
||||
# @SIDE_EFFECT: Performs network I/O to download archive.
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.APIClient.request]
|
||||
def export_dashboard(self, dashboard_id: int) -> Tuple[bytes, str]:
|
||||
with belief_scope("export_dashboard"):
|
||||
app_logger.info("[export_dashboard][Enter] Exporting dashboard %s.", dashboard_id)
|
||||
@@ -692,8 +709,8 @@ class SupersetClient:
|
||||
# @POST: Dashboard is imported or re-imported after deletion.
|
||||
# @DATA_CONTRACT: Input[file_name: Union[str, Path]] -> Output[Dict]
|
||||
# @SIDE_EFFECT: Performs network I/O to upload archive.
|
||||
# @RELATION: [CALLS] ->[self._do_import]
|
||||
# @RELATION: [CALLS] ->[self.delete_dashboard]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient._do_import]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.delete_dashboard]
|
||||
def import_dashboard(self, file_name: Union[str, Path], dash_id: Optional[int] = None, dash_slug: Optional[str] = None) -> Dict:
|
||||
with belief_scope("import_dashboard"):
|
||||
if file_name is None:
|
||||
@@ -723,7 +740,7 @@ class SupersetClient:
|
||||
# @PRE: dashboard_id must exist.
|
||||
# @POST: Dashboard is removed from Superset.
|
||||
# @SIDE_EFFECT: Deletes resource from upstream Superset environment.
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def delete_dashboard(self, dashboard_id: Union[int, str]) -> None:
|
||||
with belief_scope("delete_dashboard"):
|
||||
app_logger.info("[delete_dashboard][Enter] Deleting dashboard %s.", dashboard_id)
|
||||
@@ -735,13 +752,13 @@ class SupersetClient:
|
||||
app_logger.warning("[delete_dashboard][Warning] Unexpected response while deleting %s: %s", dashboard_id, response)
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.delete_dashboard:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_datasets:Function]
|
||||
# [DEF:SupersetClient.get_datasets:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Получает полный список датасетов, автоматически обрабатывая пагинацию.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns total count and list of datasets.
|
||||
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self._fetch_all_pages]
|
||||
# @RELATION: [CALLS] ->[SupersetClient._fetch_all_pages]
|
||||
def get_datasets(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
|
||||
with belief_scope("get_datasets"):
|
||||
app_logger.info("[get_datasets][Enter] Fetching datasets.")
|
||||
@@ -754,9 +771,9 @@ class SupersetClient:
|
||||
total_count = len(paginated_data)
|
||||
app_logger.info("[get_datasets][Exit] Found %d datasets.", total_count)
|
||||
return total_count, paginated_data
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_datasets:Function]
|
||||
# [/DEF:SupersetClient.get_datasets:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_datasets_summary:Function]
|
||||
# [DEF:SupersetClient.get_datasets_summary:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Fetches dataset metadata optimized for the Dataset Hub grid.
|
||||
# @PRE: Client is authenticated.
|
||||
@@ -788,8 +805,8 @@ class SupersetClient:
|
||||
# @POST: Returns detailed dataset info with columns and linked dashboards.
|
||||
# @PARAM: dataset_id (int) - The dataset ID to fetch details for.
|
||||
# @RETURN: Dict - Dataset details with columns and linked_dashboards.
|
||||
# @RELATION: CALLS -> self.get_dataset
|
||||
# @RELATION: CALLS -> self.network.request (for related_objects)
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.get_dataset]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.APIClient.request]
|
||||
def get_dataset_detail(self, dataset_id: int) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dataset_detail", f"id={dataset_id}"):
|
||||
def as_bool(value, default=False):
|
||||
@@ -900,7 +917,7 @@ class SupersetClient:
|
||||
# @PRE: dataset_id must exist.
|
||||
# @POST: Returns dataset details.
|
||||
# @DATA_CONTRACT: Input[dataset_id: int] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.APIClient.request]
|
||||
def get_dataset(self, dataset_id: int) -> Dict:
|
||||
with belief_scope("SupersetClient.get_dataset", f"id={dataset_id}"):
|
||||
app_logger.info("[get_dataset][Enter] Fetching dataset %s.", dataset_id)
|
||||
@@ -910,14 +927,196 @@ class SupersetClient:
|
||||
return response
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_dataset:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.update_dataset:Function]
|
||||
# [DEF:SupersetClient.compile_dataset_preview:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Compile dataset preview SQL through the real Superset chart-data endpoint and return normalized SQL output.
|
||||
# @PRE: dataset_id must be valid and template_params/effective_filters must represent the current preview session inputs.
|
||||
# @POST: Returns normalized compiled SQL plus raw upstream response without guessing unsupported endpoints.
|
||||
# @DATA_CONTRACT: Input[dataset_id:int, template_params:Dict, effective_filters:List[Dict]] -> Output[Dict[str, Any]]
|
||||
# @RELATION: [CALLS] ->[SupersetClient.get_dataset]
|
||||
# @RELATION: [CALLS] ->[SupersetClient.build_dataset_preview_query_context]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
# @RELATION: [CALLS] ->[SupersetClient._extract_compiled_sql_from_chart_data_response]
|
||||
# @SIDE_EFFECT: Performs upstream dataset lookup and chart-data network I/O against Superset.
|
||||
def compile_dataset_preview(
|
||||
self,
|
||||
dataset_id: int,
|
||||
template_params: Optional[Dict[str, Any]] = None,
|
||||
effective_filters: Optional[List[Dict[str, Any]]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
with belief_scope("SupersetClient.compile_dataset_preview", f"id={dataset_id}"):
|
||||
app_logger.reason(
|
||||
"Compiling dataset preview via Superset chart-data endpoint",
|
||||
extra={
|
||||
"dataset_id": dataset_id,
|
||||
"template_param_count": len(template_params or {}),
|
||||
"filter_count": len(effective_filters or []),
|
||||
},
|
||||
)
|
||||
dataset_response = self.get_dataset(dataset_id)
|
||||
dataset_record = dataset_response.get("result", dataset_response) if isinstance(dataset_response, dict) else {}
|
||||
query_context = self.build_dataset_preview_query_context(
|
||||
dataset_id=dataset_id,
|
||||
dataset_record=dataset_record,
|
||||
template_params=template_params or {},
|
||||
effective_filters=effective_filters or [],
|
||||
)
|
||||
response = self.network.request(
|
||||
method="POST",
|
||||
endpoint="/chart/data",
|
||||
data=json.dumps(query_context),
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
normalized = self._extract_compiled_sql_from_chart_data_response(response)
|
||||
normalized["query_context"] = query_context
|
||||
app_logger.reflect(
|
||||
"Dataset preview compilation returned normalized SQL payload",
|
||||
extra={
|
||||
"dataset_id": dataset_id,
|
||||
"compiled_sql_length": len(str(normalized.get("compiled_sql") or "")),
|
||||
},
|
||||
)
|
||||
return normalized
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.compile_dataset_preview:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.build_dataset_preview_query_context:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Build a reduced-scope chart-data query context for deterministic dataset preview compilation.
|
||||
# @PRE: dataset_record should come from Superset dataset detail when possible.
|
||||
# @POST: Returns an explicit chart-data payload based on current session inputs and dataset metadata.
|
||||
# @DATA_CONTRACT: Input[dataset_id:int,dataset_record:Dict,template_params:Dict,effective_filters:List[Dict]] -> Output[Dict[str, Any]]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient._normalize_effective_filters_for_query_context]
|
||||
# @SIDE_EFFECT: Emits reasoning and reflection logs for deterministic preview payload construction.
|
||||
def build_dataset_preview_query_context(
|
||||
self,
|
||||
dataset_id: int,
|
||||
dataset_record: Dict[str, Any],
|
||||
template_params: Dict[str, Any],
|
||||
effective_filters: List[Dict[str, Any]],
|
||||
) -> Dict[str, Any]:
|
||||
with belief_scope("SupersetClient.build_dataset_preview_query_context", f"id={dataset_id}"):
|
||||
normalized_template_params = deepcopy(template_params or {})
|
||||
normalized_filters = self._normalize_effective_filters_for_query_context(effective_filters or [])
|
||||
|
||||
datasource_payload: Dict[str, Any] = {
|
||||
"id": dataset_id,
|
||||
"type": "table",
|
||||
}
|
||||
datasource = dataset_record.get("datasource")
|
||||
if isinstance(datasource, dict):
|
||||
datasource_id = datasource.get("id")
|
||||
datasource_type = datasource.get("type")
|
||||
if datasource_id is not None:
|
||||
datasource_payload["id"] = datasource_id
|
||||
if datasource_type:
|
||||
datasource_payload["type"] = datasource_type
|
||||
|
||||
query_object: Dict[str, Any] = {
|
||||
"filters": normalized_filters,
|
||||
"extras": {"where": ""},
|
||||
"columns": [],
|
||||
"metrics": ["count"],
|
||||
"orderby": [],
|
||||
"annotation_layers": [],
|
||||
"row_limit": 1000,
|
||||
"series_limit": 0,
|
||||
"url_params": normalized_template_params,
|
||||
"custom_params": normalized_template_params,
|
||||
}
|
||||
|
||||
schema = dataset_record.get("schema")
|
||||
if schema:
|
||||
query_object["schema"] = schema
|
||||
|
||||
time_range = dataset_record.get("default_time_range")
|
||||
if time_range:
|
||||
query_object["time_range"] = time_range
|
||||
|
||||
result_format = dataset_record.get("result_format") or "json"
|
||||
result_type = dataset_record.get("result_type") or "full"
|
||||
|
||||
return {
|
||||
"datasource": datasource_payload,
|
||||
"queries": [query_object],
|
||||
"form_data": {
|
||||
"datasource": f"{datasource_payload['id']}__{datasource_payload['type']}",
|
||||
"viz_type": "table",
|
||||
"slice_id": None,
|
||||
"query_mode": "raw",
|
||||
"url_params": normalized_template_params,
|
||||
},
|
||||
"result_format": result_format,
|
||||
"result_type": result_type,
|
||||
}
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.build_dataset_preview_query_context:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._normalize_effective_filters_for_query_context:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Convert execution mappings into Superset chart-data filter objects.
|
||||
# @PRE: effective_filters may contain mapping metadata and arbitrary scalar/list values.
|
||||
# @POST: Returns only valid filter dictionaries suitable for the chart-data query payload.
|
||||
def _normalize_effective_filters_for_query_context(
|
||||
self,
|
||||
effective_filters: List[Dict[str, Any]],
|
||||
) -> List[Dict[str, Any]]:
|
||||
with belief_scope("SupersetClient._normalize_effective_filters_for_query_context"):
|
||||
normalized_filters: List[Dict[str, Any]] = []
|
||||
for item in effective_filters:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
column = str(item.get("variable_name") or item.get("filter_name") or "").strip()
|
||||
if not column:
|
||||
continue
|
||||
value = item.get("effective_value")
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
operator = "IN" if isinstance(value, list) else "=="
|
||||
normalized_filters.append(
|
||||
{
|
||||
"col": column,
|
||||
"op": operator,
|
||||
"val": value,
|
||||
}
|
||||
)
|
||||
return normalized_filters
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._normalize_effective_filters_for_query_context:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._extract_compiled_sql_from_chart_data_response:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Normalize compiled SQL from a chart-data response by reading result[].query fields first.
|
||||
# @PRE: response must be the decoded response body from /api/v1/chart/data.
|
||||
# @POST: Returns compiled SQL and raw response or raises SupersetAPIError when the endpoint does not expose query text.
|
||||
def _extract_compiled_sql_from_chart_data_response(self, response: Any) -> Dict[str, Any]:
|
||||
with belief_scope("SupersetClient._extract_compiled_sql_from_chart_data_response"):
|
||||
if not isinstance(response, dict):
|
||||
raise SupersetAPIError("Superset chart/data response was not a JSON object")
|
||||
|
||||
result_payload = response.get("result")
|
||||
if not isinstance(result_payload, list):
|
||||
raise SupersetAPIError("Superset chart/data response did not include a result list")
|
||||
|
||||
for item in result_payload:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
compiled_sql = str(item.get("query") or "").strip()
|
||||
if compiled_sql:
|
||||
return {
|
||||
"compiled_sql": compiled_sql,
|
||||
"raw_response": response,
|
||||
}
|
||||
|
||||
raise SupersetAPIError("Superset chart/data response did not expose compiled SQL in result[].query")
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._extract_compiled_sql_from_chart_data_response:Function]
|
||||
|
||||
# [DEF:SupersetClient.update_dataset:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Обновляет данные датасета по его ID.
|
||||
# @PRE: dataset_id must exist.
|
||||
# @POST: Dataset is updated in Superset.
|
||||
# @DATA_CONTRACT: Input[dataset_id: int, data: Dict] -> Output[Dict]
|
||||
# @SIDE_EFFECT: Modifies resource in upstream Superset environment.
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[APIClient.request]
|
||||
def update_dataset(self, dataset_id: int, data: Dict) -> Dict:
|
||||
with belief_scope("SupersetClient.update_dataset", f"id={dataset_id}"):
|
||||
app_logger.info("[update_dataset][Enter] Updating dataset %s.", dataset_id)
|
||||
@@ -930,15 +1129,15 @@ class SupersetClient:
|
||||
response = cast(Dict, response)
|
||||
app_logger.info("[update_dataset][Exit] Updated dataset %s.", dataset_id)
|
||||
return response
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.update_dataset:Function]
|
||||
# [/DEF:SupersetClient.update_dataset:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_databases:Function]
|
||||
# [DEF:SupersetClient.get_databases:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Получает полный список баз данных.
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns total count and list of databases.
|
||||
# @DATA_CONTRACT: Input[query: Optional[Dict]] -> Output[Tuple[int, List[Dict]]]
|
||||
# @RELATION: [CALLS] ->[self._fetch_all_pages]
|
||||
# @RELATION: [CALLS] ->[SupersetClient._fetch_all_pages]
|
||||
def get_databases(self, query: Optional[Dict] = None) -> Tuple[int, List[Dict]]:
|
||||
with belief_scope("get_databases"):
|
||||
app_logger.info("[get_databases][Enter] Fetching databases.")
|
||||
@@ -953,7 +1152,7 @@ class SupersetClient:
|
||||
total_count = len(paginated_data)
|
||||
app_logger.info("[get_databases][Exit] Found %d databases.", total_count)
|
||||
return total_count, paginated_data
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_databases:Function]
|
||||
# [/DEF:SupersetClient.get_databases:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient.get_database:Function]
|
||||
# @COMPLEXITY: 3
|
||||
@@ -961,7 +1160,7 @@ class SupersetClient:
|
||||
# @PRE: database_id must exist.
|
||||
# @POST: Returns database details.
|
||||
# @DATA_CONTRACT: Input[database_id: int] -> Output[Dict]
|
||||
# @RELATION: [CALLS] ->[self.network.request]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.APIClient.request]
|
||||
def get_database(self, database_id: int) -> Dict:
|
||||
with belief_scope("get_database"):
|
||||
app_logger.info("[get_database][Enter] Fetching database %s.", database_id)
|
||||
@@ -977,7 +1176,7 @@ class SupersetClient:
|
||||
# @PRE: Client is authenticated.
|
||||
# @POST: Returns list of database summaries.
|
||||
# @DATA_CONTRACT: None -> Output[List[Dict]]
|
||||
# @RELATION: [CALLS] ->[self.get_databases]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.get_databases]
|
||||
def get_databases_summary(self) -> List[Dict]:
|
||||
with belief_scope("SupersetClient.get_databases_summary"):
|
||||
query = {
|
||||
@@ -998,7 +1197,7 @@ class SupersetClient:
|
||||
# @PRE: db_uuid must be a valid UUID string.
|
||||
# @POST: Returns database info or None.
|
||||
# @DATA_CONTRACT: Input[db_uuid: str] -> Output[Optional[Dict]]
|
||||
# @RELATION: [CALLS] ->[self.get_databases]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient.get_databases]
|
||||
def get_database_by_uuid(self, db_uuid: str) -> Optional[Dict]:
|
||||
with belief_scope("SupersetClient.get_database_by_uuid", f"uuid={db_uuid}"):
|
||||
query = {
|
||||
@@ -1008,12 +1207,12 @@ class SupersetClient:
|
||||
return databases[0] if databases else None
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient.get_database_by_uuid:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._resolve_target_id_for_delete:Function]
|
||||
# [DEF:SupersetClient._resolve_target_id_for_delete:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Resolves a dashboard ID from either an ID or a slug.
|
||||
# @PRE: Either dash_id or dash_slug should be provided.
|
||||
# @POST: Returns the resolved ID or None.
|
||||
# @RELATION: [CALLS] ->[self.get_dashboards]
|
||||
# @RELATION: [CALLS] ->[SupersetClient.get_dashboards]
|
||||
def _resolve_target_id_for_delete(self, dash_id: Optional[int], dash_slug: Optional[str]) -> Optional[int]:
|
||||
with belief_scope("_resolve_target_id_for_delete"):
|
||||
if dash_id is not None:
|
||||
@@ -1029,14 +1228,14 @@ class SupersetClient:
|
||||
except Exception as e:
|
||||
app_logger.warning("[_resolve_target_id_for_delete][Warning] Could not resolve slug '%s' to ID: %s", dash_slug, e)
|
||||
return None
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._resolve_target_id_for_delete:Function]
|
||||
# [/DEF:SupersetClient._resolve_target_id_for_delete:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._do_import:Function]
|
||||
# [DEF:SupersetClient._do_import:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Performs the actual multipart upload for import.
|
||||
# @PRE: file_name must be a path to an existing ZIP file.
|
||||
# @POST: Returns the API response from the upload.
|
||||
# @RELATION: [CALLS] ->[self.network.upload_file]
|
||||
# @RELATION: [CALLS] ->[APIClient.upload_file]
|
||||
def _do_import(self, file_name: Union[str, Path]) -> Dict:
|
||||
with belief_scope("_do_import"):
|
||||
app_logger.debug(f"[_do_import][State] Uploading file: {file_name}")
|
||||
@@ -1051,7 +1250,7 @@ class SupersetClient:
|
||||
extra_data={"overwrite": "true"},
|
||||
timeout=self.env.timeout * 2,
|
||||
)
|
||||
# [/DEF:backend.src.core.superset_client.SupersetClient._do_import:Function]
|
||||
# [/DEF:SupersetClient._do_import:Function]
|
||||
|
||||
# [DEF:backend.src.core.superset_client.SupersetClient._validate_export_response:Function]
|
||||
# @COMPLEXITY: 1
|
||||
@@ -1101,7 +1300,7 @@ class SupersetClient:
|
||||
# @PURPOSE: Fetches the total number of items for a given endpoint.
|
||||
# @PRE: endpoint must be a valid Superset API path.
|
||||
# @POST: Returns the total count as an integer.
|
||||
# @RELATION: [CALLS] ->[self.network.fetch_paginated_count]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.APIClient.fetch_paginated_count]
|
||||
def _fetch_total_object_count(self, endpoint: str) -> int:
|
||||
with belief_scope("_fetch_total_object_count"):
|
||||
return self.network.fetch_paginated_count(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# [DEF:backend.src.core.utils.async_network:Module]
|
||||
# [DEF:AsyncNetworkModule:Module]
|
||||
#
|
||||
# @COMPLEXITY: 5
|
||||
# @SEMANTICS: network, httpx, async, superset, authentication, cache
|
||||
@@ -8,7 +8,7 @@
|
||||
# @POST: Async network clients reuse cached auth tokens and expose stable async request/error translation flow.
|
||||
# @SIDE_EFFECT: Performs upstream HTTP I/O and mutates process-local auth cache entries.
|
||||
# @DATA_CONTRACT: Input[config: Dict[str, Any]] -> Output[authenticated async Superset HTTP interactions]
|
||||
# @RELATION: DEPENDS_ON -> backend.src.core.utils.network.SupersetAuthCache
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetAuthCache]
|
||||
# @INVARIANT: Async client reuses cached auth tokens per environment credentials and invalidates on 401.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
@@ -29,22 +29,24 @@ from .network import (
|
||||
# [/SECTION]
|
||||
|
||||
|
||||
# [DEF:backend.src.core.utils.async_network.AsyncAPIClient:Class]
|
||||
# [DEF:AsyncAPIClient:Class]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Async Superset API client backed by httpx.AsyncClient with shared auth cache.
|
||||
# @RELATION: [DEPENDS_ON] ->[backend.src.core.utils.network.SupersetAuthCache]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.SupersetAuthCache.get]
|
||||
# @RELATION: [CALLS] ->[backend.src.core.utils.network.SupersetAuthCache.set]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetAuthCache]
|
||||
# @RELATION: [CALLS] ->[SupersetAuthCache.get]
|
||||
# @RELATION: [CALLS] ->[SupersetAuthCache.set]
|
||||
class AsyncAPIClient:
|
||||
DEFAULT_TIMEOUT = 30
|
||||
_auth_locks: Dict[tuple[str, str, bool], asyncio.Lock] = {}
|
||||
|
||||
# [DEF:backend.src.core.utils.async_network.AsyncAPIClient.__init__:Function]
|
||||
# [DEF:AsyncAPIClient.__init__:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Initialize async API client for one environment.
|
||||
# @PRE: config contains base_url and auth payload.
|
||||
# @POST: Client is ready for async request/authentication flow.
|
||||
# @DATA_CONTRACT: Input[config: Dict[str, Any]] -> self._auth_cache_key[str]
|
||||
# @RELATION: [CALLS] ->[AsyncAPIClient._normalize_base_url]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetAuthCache]
|
||||
def __init__(self, config: Dict[str, Any], verify_ssl: bool = True, timeout: int = DEFAULT_TIMEOUT):
|
||||
self.base_url: str = self._normalize_base_url(config.get("base_url", ""))
|
||||
self.api_base_url: str = f"{self.base_url}/api/v1"
|
||||
@@ -63,9 +65,9 @@ class AsyncAPIClient:
|
||||
verify_ssl,
|
||||
)
|
||||
|
||||
# [/DEF:__init__:Function]
|
||||
# [/DEF:AsyncAPIClient.__init__:Function]
|
||||
|
||||
# [DEF:backend.src.core.utils.async_network.AsyncAPIClient._normalize_base_url:Function]
|
||||
# [DEF:AsyncAPIClient._normalize_base_url:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Normalize base URL for Superset API root construction.
|
||||
# @POST: Returns canonical base URL without trailing slash and duplicate /api/v1 suffix.
|
||||
@@ -74,9 +76,9 @@ class AsyncAPIClient:
|
||||
if normalized.lower().endswith("/api/v1"):
|
||||
normalized = normalized[:-len("/api/v1")]
|
||||
return normalized.rstrip("/")
|
||||
# [/DEF:_normalize_base_url:Function]
|
||||
# [/DEF:AsyncAPIClient._normalize_base_url:Function]
|
||||
|
||||
# [DEF:_build_api_url:Function]
|
||||
# [DEF:AsyncAPIClient._build_api_url:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Build full API URL from relative Superset endpoint.
|
||||
# @POST: Returns absolute URL for upstream request.
|
||||
@@ -89,9 +91,9 @@ class AsyncAPIClient:
|
||||
if normalized_endpoint.startswith("/api/v1/") or normalized_endpoint == "/api/v1":
|
||||
return f"{self.base_url}{normalized_endpoint}"
|
||||
return f"{self.api_base_url}{normalized_endpoint}"
|
||||
# [/DEF:_build_api_url:Function]
|
||||
# [/DEF:AsyncAPIClient._build_api_url:Function]
|
||||
|
||||
# [DEF:_get_auth_lock:Function]
|
||||
# [DEF:AsyncAPIClient._get_auth_lock:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Return per-cache-key async lock to serialize fresh login attempts.
|
||||
# @POST: Returns stable asyncio.Lock instance.
|
||||
@@ -103,14 +105,16 @@ class AsyncAPIClient:
|
||||
created_lock = asyncio.Lock()
|
||||
cls._auth_locks[cache_key] = created_lock
|
||||
return created_lock
|
||||
# [/DEF:_get_auth_lock:Function]
|
||||
# [/DEF:AsyncAPIClient._get_auth_lock:Function]
|
||||
|
||||
# [DEF:authenticate:Function]
|
||||
# [DEF:AsyncAPIClient.authenticate:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Authenticate against Superset and cache access/csrf tokens.
|
||||
# @POST: Client tokens are populated and reusable across requests.
|
||||
# @SIDE_EFFECT: Performs network requests to Superset authentication endpoints.
|
||||
# @DATA_CONTRACT: None -> Output[Dict[str, str]]
|
||||
# @RELATION: [CALLS] ->[SupersetAuthCache.get]
|
||||
# @RELATION: [CALLS] ->[SupersetAuthCache.set]
|
||||
async def authenticate(self) -> Dict[str, str]:
|
||||
cached_tokens = SupersetAuthCache.get(self._auth_cache_key)
|
||||
if cached_tokens and cached_tokens.get("access_token") and cached_tokens.get("csrf_token"):
|
||||
@@ -163,13 +167,13 @@ class AsyncAPIClient:
|
||||
except (httpx.HTTPError, KeyError) as exc:
|
||||
SupersetAuthCache.invalidate(self._auth_cache_key)
|
||||
raise NetworkError(f"Network or parsing error during authentication: {exc}") from exc
|
||||
# [/DEF:authenticate:Function]
|
||||
# [/DEF:AsyncAPIClient.authenticate:Function]
|
||||
|
||||
# [DEF:get_headers:Function]
|
||||
# [DEF:AsyncAPIClient.get_headers:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Return authenticated Superset headers for async requests.
|
||||
# @POST: Headers include Authorization and CSRF tokens.
|
||||
# @RELATION: CALLS -> self.authenticate
|
||||
# @RELATION: [CALLS] ->[AsyncAPIClient.authenticate]
|
||||
async def get_headers(self) -> Dict[str, str]:
|
||||
if not self._authenticated:
|
||||
await self.authenticate()
|
||||
@@ -179,16 +183,16 @@ class AsyncAPIClient:
|
||||
"Referer": self.base_url,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
# [/DEF:get_headers:Function]
|
||||
# [/DEF:AsyncAPIClient.get_headers:Function]
|
||||
|
||||
# [DEF:request:Function]
|
||||
# [DEF:AsyncAPIClient.request:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Perform one authenticated async Superset API request.
|
||||
# @POST: Returns JSON payload or raw httpx.Response when raw_response=true.
|
||||
# @SIDE_EFFECT: Performs network I/O.
|
||||
# @RELATION: [CALLS] ->[self.get_headers]
|
||||
# @RELATION: [CALLS] ->[self._handle_http_error]
|
||||
# @RELATION: [CALLS] ->[self._handle_network_error]
|
||||
# @RELATION: [CALLS] ->[AsyncAPIClient.get_headers]
|
||||
# @RELATION: [CALLS] ->[AsyncAPIClient._handle_http_error]
|
||||
# @RELATION: [CALLS] ->[AsyncAPIClient._handle_network_error]
|
||||
async def request(
|
||||
self,
|
||||
method: str,
|
||||
@@ -216,9 +220,9 @@ class AsyncAPIClient:
|
||||
self._handle_http_error(exc, endpoint)
|
||||
except httpx.HTTPError as exc:
|
||||
self._handle_network_error(exc, full_url)
|
||||
# [/DEF:request:Function]
|
||||
# [/DEF:AsyncAPIClient.request:Function]
|
||||
|
||||
# [DEF:_handle_http_error:Function]
|
||||
# [DEF:AsyncAPIClient._handle_http_error:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Translate upstream HTTP errors into stable domain exceptions.
|
||||
# @POST: Raises domain-specific exception for caller flow control.
|
||||
@@ -229,15 +233,40 @@ class AsyncAPIClient:
|
||||
if status_code in [502, 503, 504]:
|
||||
raise NetworkError(f"Environment unavailable (Status {status_code})", status_code=status_code) from exc
|
||||
if status_code == 404:
|
||||
raise DashboardNotFoundError(endpoint) from exc
|
||||
if self._is_dashboard_endpoint(endpoint):
|
||||
raise DashboardNotFoundError(endpoint) from exc
|
||||
raise SupersetAPIError(
|
||||
f"API resource not found at endpoint '{endpoint}'",
|
||||
status_code=status_code,
|
||||
endpoint=endpoint,
|
||||
subtype="not_found",
|
||||
) from exc
|
||||
if status_code == 403:
|
||||
raise PermissionDeniedError() from exc
|
||||
if status_code == 401:
|
||||
raise AuthenticationError() from exc
|
||||
raise SupersetAPIError(f"API Error {status_code}: {exc.response.text}") from exc
|
||||
# [/DEF:_handle_http_error:Function]
|
||||
# [/DEF:AsyncAPIClient._handle_http_error:Function]
|
||||
|
||||
# [DEF:_handle_network_error:Function]
|
||||
# [DEF:AsyncAPIClient._is_dashboard_endpoint:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Determine whether an API endpoint represents a dashboard resource for 404 translation.
|
||||
# @POST: Returns true only for dashboard-specific endpoints.
|
||||
def _is_dashboard_endpoint(self, endpoint: str) -> bool:
|
||||
normalized_endpoint = str(endpoint or "").strip().lower()
|
||||
if not normalized_endpoint:
|
||||
return False
|
||||
if normalized_endpoint.startswith("http://") or normalized_endpoint.startswith("https://"):
|
||||
try:
|
||||
normalized_endpoint = "/" + normalized_endpoint.split("/api/v1", 1)[1].lstrip("/")
|
||||
except IndexError:
|
||||
return False
|
||||
if normalized_endpoint.startswith("/api/v1/"):
|
||||
normalized_endpoint = normalized_endpoint[len("/api/v1"):]
|
||||
return normalized_endpoint.startswith("/dashboard/") or normalized_endpoint == "/dashboard"
|
||||
# [/DEF:backend.src.core.utils.async_network.AsyncAPIClient._is_dashboard_endpoint:Function]
|
||||
|
||||
# [DEF:backend.src.core.utils.async_network.AsyncAPIClient._handle_network_error:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Translate generic httpx errors into NetworkError.
|
||||
# @POST: Raises NetworkError with URL context.
|
||||
@@ -251,16 +280,16 @@ class AsyncAPIClient:
|
||||
else:
|
||||
message = f"Unknown network error: {exc}"
|
||||
raise NetworkError(message, url=url) from exc
|
||||
# [/DEF:_handle_network_error:Function]
|
||||
# [/DEF:backend.src.core.utils.async_network.AsyncAPIClient._handle_network_error:Function]
|
||||
|
||||
# [DEF:aclose:Function]
|
||||
# [DEF:backend.src.core.utils.async_network.AsyncAPIClient.aclose:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Close underlying httpx client.
|
||||
# @POST: Client resources are released.
|
||||
# @SIDE_EFFECT: Closes network connections.
|
||||
async def aclose(self) -> None:
|
||||
await self._client.aclose()
|
||||
# [/DEF:aclose:Function]
|
||||
# [/DEF:AsyncAPIClient:Class]
|
||||
# [/DEF:backend.src.core.utils.async_network.AsyncAPIClient.aclose:Function]
|
||||
# [/DEF:backend.src.core.utils.async_network.AsyncAPIClient:Class]
|
||||
|
||||
# [/DEF:backend.src.core.utils.async_network:Module]
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
# [DEF:network:Module]
|
||||
# [DEF:NetworkModule:Module]
|
||||
#
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: network, http, client, api, requests, session, authentication
|
||||
# @PURPOSE: Инкапсулирует низкоуровневую HTTP-логику для взаимодействия с Superset API, включая аутентификацию, управление сессией, retry-логику и обработку ошибок.
|
||||
# @LAYER: Infra
|
||||
# @RELATION: DEPENDS_ON -> backend.src.core.logger
|
||||
# @RELATION: DEPENDS_ON -> requests
|
||||
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
|
||||
# @PUBLIC_API: APIClient
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
@@ -82,7 +81,7 @@ class DashboardNotFoundError(SupersetAPIError):
|
||||
# [DEF:NetworkError:Class]
|
||||
# @PURPOSE: Exception raised when a network level error occurs.
|
||||
class NetworkError(Exception):
|
||||
# [DEF:network.APIClient.__init__:Function]
|
||||
# [DEF:NetworkError.__init__:Function]
|
||||
# @PURPOSE: Initializes the network error.
|
||||
# @PRE: message is a string.
|
||||
# @POST: NetworkError is initialized.
|
||||
@@ -90,11 +89,11 @@ class NetworkError(Exception):
|
||||
with belief_scope("NetworkError.__init__"):
|
||||
self.context = context
|
||||
super().__init__(f"[NETWORK_FAILURE] {message} | Context: {self.context}")
|
||||
# [/DEF:__init__:Function]
|
||||
# [/DEF:NetworkError.__init__:Function]
|
||||
# [/DEF:NetworkError:Class]
|
||||
|
||||
|
||||
# [DEF:network.SupersetAuthCache:Class]
|
||||
# [DEF:SupersetAuthCache:Class]
|
||||
# @PURPOSE: Process-local cache for Superset access/csrf tokens keyed by environment credentials.
|
||||
# @PRE: base_url and username are stable strings.
|
||||
# @POST: Cached entries expire automatically by TTL and can be reused across requests.
|
||||
@@ -152,8 +151,8 @@ class SupersetAuthCache:
|
||||
# [DEF:APIClient:Class]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Synchronous Superset API client with process-local auth token caching.
|
||||
# @RELATION: DEPENDS_ON -> network.SupersetAuthCache
|
||||
# @RELATION: DEPENDS_ON -> logger
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetAuthCache]
|
||||
# @RELATION: [DEPENDS_ON] ->[LoggerModule]
|
||||
class APIClient:
|
||||
DEFAULT_TIMEOUT = 30
|
||||
|
||||
@@ -256,7 +255,7 @@ class APIClient:
|
||||
return f"{self.api_base_url}{normalized_endpoint}"
|
||||
# [/DEF:_build_api_url:Function]
|
||||
|
||||
# [DEF:authenticate:Function]
|
||||
# [DEF:APIClient.authenticate:Function]
|
||||
# @PURPOSE: Выполняет аутентификацию в Superset API и получает access и CSRF токены.
|
||||
# @PRE: self.auth and self.base_url must be valid.
|
||||
# @POST: `self._tokens` заполнен, `self._authenticated` установлен в `True`.
|
||||
@@ -364,7 +363,14 @@ class APIClient:
|
||||
if status_code == 502 or status_code == 503 or status_code == 504:
|
||||
raise NetworkError(f"Environment unavailable (Status {status_code})", status_code=status_code) from e
|
||||
if status_code == 404:
|
||||
raise DashboardNotFoundError(endpoint) from e
|
||||
if self._is_dashboard_endpoint(endpoint):
|
||||
raise DashboardNotFoundError(endpoint) from e
|
||||
raise SupersetAPIError(
|
||||
f"API resource not found at endpoint '{endpoint}'",
|
||||
status_code=status_code,
|
||||
endpoint=endpoint,
|
||||
subtype="not_found",
|
||||
) from e
|
||||
if status_code == 403:
|
||||
raise PermissionDeniedError() from e
|
||||
if status_code == 401:
|
||||
@@ -372,6 +378,24 @@ class APIClient:
|
||||
raise SupersetAPIError(f"API Error {status_code}: {e.response.text}") from e
|
||||
# [/DEF:_handle_http_error:Function]
|
||||
|
||||
# [DEF:_is_dashboard_endpoint:Function]
|
||||
# @PURPOSE: Determine whether an API endpoint represents a dashboard resource for 404 translation.
|
||||
# @PRE: endpoint may be relative or absolute.
|
||||
# @POST: Returns true only for dashboard-specific endpoints.
|
||||
def _is_dashboard_endpoint(self, endpoint: str) -> bool:
|
||||
normalized_endpoint = str(endpoint or "").strip().lower()
|
||||
if not normalized_endpoint:
|
||||
return False
|
||||
if normalized_endpoint.startswith("http://") or normalized_endpoint.startswith("https://"):
|
||||
try:
|
||||
normalized_endpoint = "/" + normalized_endpoint.split("/api/v1", 1)[1].lstrip("/")
|
||||
except IndexError:
|
||||
return False
|
||||
if normalized_endpoint.startswith("/api/v1/"):
|
||||
normalized_endpoint = normalized_endpoint[len("/api/v1"):]
|
||||
return normalized_endpoint.startswith("/dashboard/") or normalized_endpoint == "/dashboard"
|
||||
# [/DEF:_is_dashboard_endpoint:Function]
|
||||
|
||||
# [DEF:_handle_network_error:Function]
|
||||
# @PURPOSE: (Helper) Преобразует сетевые ошибки в `NetworkError`.
|
||||
# @PARAM: e (requests.exceptions.RequestException) - Ошибка.
|
||||
@@ -505,4 +529,4 @@ class APIClient:
|
||||
|
||||
# [/DEF:APIClient:Class]
|
||||
|
||||
# [/DEF:backend.core.utils.network:Module]
|
||||
# [/DEF:NetworkModule:Module]
|
||||
|
||||
354
backend/src/core/utils/superset_compilation_adapter.py
Normal file
354
backend/src/core/utils/superset_compilation_adapter.py
Normal file
@@ -0,0 +1,354 @@
|
||||
# [DEF:SupersetCompilationAdapter:Module]
|
||||
# @COMPLEXITY: 4
|
||||
# @SEMANTICS: dataset_review, superset, compilation_preview, sql_lab_launch, execution_truth
|
||||
# @PURPOSE: Interact with Superset preview compilation and SQL Lab execution endpoints using the current approved execution context.
|
||||
# @LAYER: Infra
|
||||
# @RELATION: [CALLS] ->[SupersetClient]
|
||||
# @RELATION: [DEPENDS_ON] ->[CompiledPreview]
|
||||
# @PRE: effective template params and dataset execution reference are available.
|
||||
# @POST: preview and launch calls return Superset-originated artifacts or explicit errors.
|
||||
# @SIDE_EFFECT: performs upstream Superset preview and SQL Lab calls.
|
||||
# @INVARIANT: The adapter never fabricates compiled SQL locally; preview truth is delegated to Superset only.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:SupersetCompilationAdapter.imports:Block]
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from src.core.config_models import Environment
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.core.superset_client import SupersetClient
|
||||
from src.models.dataset_review import CompiledPreview, PreviewStatus
|
||||
# [/DEF:SupersetCompilationAdapter.imports:Block]
|
||||
|
||||
|
||||
# [DEF:PreviewCompilationPayload:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed preview payload for Superset-side compilation.
|
||||
@dataclass(frozen=True)
|
||||
class PreviewCompilationPayload:
|
||||
session_id: str
|
||||
dataset_id: int
|
||||
preview_fingerprint: str
|
||||
template_params: Dict[str, Any]
|
||||
effective_filters: List[Dict[str, Any]]
|
||||
# [/DEF:PreviewCompilationPayload:Class]
|
||||
|
||||
|
||||
# [DEF:SqlLabLaunchPayload:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed SQL Lab payload for audited launch handoff.
|
||||
@dataclass(frozen=True)
|
||||
class SqlLabLaunchPayload:
|
||||
session_id: str
|
||||
dataset_id: int
|
||||
preview_id: str
|
||||
compiled_sql: str
|
||||
template_params: Dict[str, Any]
|
||||
# [/DEF:SqlLabLaunchPayload:Class]
|
||||
|
||||
|
||||
# [DEF:SupersetCompilationAdapter:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Delegate preview compilation and SQL Lab launch to Superset without local SQL fabrication.
|
||||
# @RELATION: [CALLS] ->[SupersetClient]
|
||||
# @PRE: environment is configured and Superset is reachable for the target session.
|
||||
# @POST: adapter can return explicit ready/failed preview artifacts and canonical SQL Lab references.
|
||||
# @SIDE_EFFECT: issues network requests to Superset API surfaces.
|
||||
class SupersetCompilationAdapter:
|
||||
# [DEF:SupersetCompilationAdapter.__init__:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Bind adapter to one Superset environment and client instance.
|
||||
def __init__(self, environment: Environment, client: Optional[SupersetClient] = None) -> None:
|
||||
self.environment = environment
|
||||
self.client = client or SupersetClient(environment)
|
||||
# [/DEF:SupersetCompilationAdapter.__init__:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter.compile_preview:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Request Superset-side compiled SQL preview for the current effective inputs.
|
||||
# @RELATION: [CALLS] ->[SupersetCompilationAdapter._request_superset_preview]
|
||||
# @PRE: dataset_id and effective inputs are available for the current session.
|
||||
# @POST: returns a ready or failed preview artifact backed only by Superset-originated SQL or diagnostics.
|
||||
# @SIDE_EFFECT: performs upstream preview requests.
|
||||
# @DATA_CONTRACT: Input[PreviewCompilationPayload] -> Output[CompiledPreview]
|
||||
def compile_preview(self, payload: PreviewCompilationPayload) -> CompiledPreview:
|
||||
with belief_scope("SupersetCompilationAdapter.compile_preview"):
|
||||
if payload.dataset_id <= 0:
|
||||
logger.explore(
|
||||
"Preview compilation rejected because dataset identifier is invalid",
|
||||
extra={"dataset_id": payload.dataset_id, "session_id": payload.session_id},
|
||||
)
|
||||
raise ValueError("dataset_id must be a positive integer")
|
||||
|
||||
logger.reason(
|
||||
"Requesting Superset-generated SQL preview",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"dataset_id": payload.dataset_id,
|
||||
"template_param_count": len(payload.template_params),
|
||||
"filter_count": len(payload.effective_filters),
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
preview_result = self._request_superset_preview(payload)
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"Superset preview compilation failed with explicit upstream error",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"dataset_id": payload.dataset_id,
|
||||
"error": str(exc),
|
||||
},
|
||||
)
|
||||
return CompiledPreview(
|
||||
session_id=payload.session_id,
|
||||
preview_status=PreviewStatus.FAILED,
|
||||
compiled_sql=None,
|
||||
preview_fingerprint=payload.preview_fingerprint,
|
||||
compiled_by="superset",
|
||||
error_code="superset_preview_failed",
|
||||
error_details=str(exc),
|
||||
compiled_at=None,
|
||||
)
|
||||
|
||||
compiled_sql = str(preview_result.get("compiled_sql") or "").strip()
|
||||
if not compiled_sql:
|
||||
logger.explore(
|
||||
"Superset preview response did not include compiled SQL",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"dataset_id": payload.dataset_id,
|
||||
"response_keys": sorted(preview_result.keys()),
|
||||
},
|
||||
)
|
||||
return CompiledPreview(
|
||||
session_id=payload.session_id,
|
||||
preview_status=PreviewStatus.FAILED,
|
||||
compiled_sql=None,
|
||||
preview_fingerprint=payload.preview_fingerprint,
|
||||
compiled_by="superset",
|
||||
error_code="superset_preview_empty",
|
||||
error_details="Superset preview response did not include compiled SQL",
|
||||
compiled_at=None,
|
||||
)
|
||||
|
||||
preview = CompiledPreview(
|
||||
session_id=payload.session_id,
|
||||
preview_status=PreviewStatus.READY,
|
||||
compiled_sql=compiled_sql,
|
||||
preview_fingerprint=payload.preview_fingerprint,
|
||||
compiled_by="superset",
|
||||
error_code=None,
|
||||
error_details=None,
|
||||
compiled_at=datetime.utcnow(),
|
||||
)
|
||||
logger.reflect(
|
||||
"Superset-generated SQL preview captured successfully",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"dataset_id": payload.dataset_id,
|
||||
"compiled_sql_length": len(compiled_sql),
|
||||
},
|
||||
)
|
||||
return preview
|
||||
# [/DEF:SupersetCompilationAdapter.compile_preview:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter.mark_preview_stale:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Invalidate previous preview after mapping or value changes.
|
||||
# @PRE: preview is a persisted preview artifact or current in-memory snapshot.
|
||||
# @POST: preview status becomes stale without fabricating a replacement artifact.
|
||||
def mark_preview_stale(self, preview: CompiledPreview) -> CompiledPreview:
|
||||
preview.preview_status = PreviewStatus.STALE
|
||||
return preview
|
||||
# [/DEF:SupersetCompilationAdapter.mark_preview_stale:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter.create_sql_lab_session:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Create the canonical audited execution session after all launch gates pass.
|
||||
# @RELATION: [CALLS] ->[SupersetCompilationAdapter._request_sql_lab_session]
|
||||
# @PRE: compiled_sql is Superset-originated and launch gates are already satisfied.
|
||||
# @POST: returns one canonical SQL Lab session reference from Superset.
|
||||
# @SIDE_EFFECT: performs upstream SQL Lab execution/session creation.
|
||||
# @DATA_CONTRACT: Input[SqlLabLaunchPayload] -> Output[str]
|
||||
def create_sql_lab_session(self, payload: SqlLabLaunchPayload) -> str:
|
||||
with belief_scope("SupersetCompilationAdapter.create_sql_lab_session"):
|
||||
compiled_sql = str(payload.compiled_sql or "").strip()
|
||||
if not compiled_sql:
|
||||
logger.explore(
|
||||
"SQL Lab launch rejected because compiled SQL is empty",
|
||||
extra={"session_id": payload.session_id, "preview_id": payload.preview_id},
|
||||
)
|
||||
raise ValueError("compiled_sql must be non-empty")
|
||||
|
||||
logger.reason(
|
||||
"Creating SQL Lab execution session from Superset-originated preview",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"dataset_id": payload.dataset_id,
|
||||
"preview_id": payload.preview_id,
|
||||
},
|
||||
)
|
||||
result = self._request_sql_lab_session(payload)
|
||||
sql_lab_session_ref = str(
|
||||
result.get("sql_lab_session_ref")
|
||||
or result.get("query_id")
|
||||
or result.get("id")
|
||||
or result.get("result", {}).get("id")
|
||||
or ""
|
||||
).strip()
|
||||
if not sql_lab_session_ref:
|
||||
logger.explore(
|
||||
"Superset SQL Lab launch response did not include a stable session reference",
|
||||
extra={"session_id": payload.session_id, "preview_id": payload.preview_id},
|
||||
)
|
||||
raise RuntimeError("Superset SQL Lab launch response did not include a session reference")
|
||||
|
||||
logger.reflect(
|
||||
"Canonical SQL Lab session created successfully",
|
||||
extra={
|
||||
"session_id": payload.session_id,
|
||||
"preview_id": payload.preview_id,
|
||||
"sql_lab_session_ref": sql_lab_session_ref,
|
||||
},
|
||||
)
|
||||
return sql_lab_session_ref
|
||||
# [/DEF:SupersetCompilationAdapter.create_sql_lab_session:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter._request_superset_preview:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Request preview compilation through explicit client support backed by real Superset endpoints only.
|
||||
# @RELATION: [CALLS] ->[SupersetClient.compile_dataset_preview]
|
||||
# @PRE: payload contains a valid dataset identifier and deterministic execution inputs for one preview attempt.
|
||||
# @POST: returns one normalized upstream compilation response without endpoint guessing.
|
||||
# @SIDE_EFFECT: issues one Superset chart-data request through the client.
|
||||
# @DATA_CONTRACT: Input[PreviewCompilationPayload] -> Output[Dict[str,Any]]
|
||||
def _request_superset_preview(self, payload: PreviewCompilationPayload) -> Dict[str, Any]:
|
||||
try:
|
||||
logger.reason(
|
||||
"Attempting deterministic Superset preview compilation via chart/data",
|
||||
extra={
|
||||
"dataset_id": payload.dataset_id,
|
||||
"session_id": payload.session_id,
|
||||
"filter_count": len(payload.effective_filters),
|
||||
"template_param_count": len(payload.template_params),
|
||||
},
|
||||
)
|
||||
response = self.client.compile_dataset_preview(
|
||||
dataset_id=payload.dataset_id,
|
||||
template_params=payload.template_params,
|
||||
effective_filters=payload.effective_filters,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"Superset preview compilation via chart/data failed",
|
||||
extra={
|
||||
"dataset_id": payload.dataset_id,
|
||||
"session_id": payload.session_id,
|
||||
"error": str(exc),
|
||||
},
|
||||
)
|
||||
raise RuntimeError(str(exc)) from exc
|
||||
|
||||
normalized = self._normalize_preview_response(response)
|
||||
if normalized is None:
|
||||
raise RuntimeError("Superset chart/data compilation response could not be normalized")
|
||||
return normalized
|
||||
# [/DEF:SupersetCompilationAdapter._request_superset_preview:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter._request_sql_lab_session:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Probe supported SQL Lab execution surfaces and return the first successful response.
|
||||
# @RELATION: [CALLS] ->[SupersetClient.get_dataset]
|
||||
# @PRE: payload carries non-empty Superset-originated SQL and a preview identifier for the current launch.
|
||||
# @POST: returns the first successful SQL Lab execution response from Superset.
|
||||
# @SIDE_EFFECT: issues Superset dataset lookup and SQL Lab execution requests.
|
||||
# @DATA_CONTRACT: Input[SqlLabLaunchPayload] -> Output[Dict[str,Any]]
|
||||
def _request_sql_lab_session(self, payload: SqlLabLaunchPayload) -> Dict[str, Any]:
|
||||
dataset_raw = self.client.get_dataset(payload.dataset_id)
|
||||
dataset_record = dataset_raw.get("result", dataset_raw) if isinstance(dataset_raw, dict) else {}
|
||||
database_id = dataset_record.get("database", {}).get("id") if isinstance(dataset_record.get("database"), dict) else dataset_record.get("database_id")
|
||||
if database_id is None:
|
||||
raise RuntimeError("Superset dataset does not expose a database identifier for SQL Lab launch")
|
||||
|
||||
request_payload = {
|
||||
"database_id": database_id,
|
||||
"sql": payload.compiled_sql,
|
||||
"templateParams": payload.template_params,
|
||||
"schema": dataset_record.get("schema"),
|
||||
"client_id": payload.preview_id,
|
||||
}
|
||||
candidate_calls = [
|
||||
{"kind": "network", "target": "/sqllab/execute/", "http_method": "POST"},
|
||||
{"kind": "network", "target": "/sql_lab/execute/", "http_method": "POST"},
|
||||
]
|
||||
errors: List[str] = []
|
||||
|
||||
for candidate in candidate_calls:
|
||||
try:
|
||||
response = self.client.network.request(
|
||||
method=candidate["http_method"],
|
||||
endpoint=candidate["target"],
|
||||
data=self._dump_json(request_payload),
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
if isinstance(response, dict) and response:
|
||||
return response
|
||||
except Exception as exc:
|
||||
errors.append(f"{candidate['target']}:{exc}")
|
||||
logger.explore(
|
||||
"Superset SQL Lab candidate failed",
|
||||
extra={"target": candidate["target"], "error": str(exc)},
|
||||
)
|
||||
|
||||
raise RuntimeError("; ".join(errors) or "No Superset SQL Lab surface accepted the request")
|
||||
# [/DEF:SupersetCompilationAdapter._request_sql_lab_session:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter._normalize_preview_response:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Normalize candidate Superset preview responses into one compiled-sql structure.
|
||||
# @RELATION: [DEPENDS_ON] ->[CompiledPreview]
|
||||
def _normalize_preview_response(self, response: Any) -> Optional[Dict[str, Any]]:
|
||||
if not isinstance(response, dict):
|
||||
return None
|
||||
|
||||
compiled_sql_candidates = [
|
||||
response.get("compiled_sql"),
|
||||
response.get("sql"),
|
||||
response.get("query"),
|
||||
]
|
||||
result_payload = response.get("result")
|
||||
if isinstance(result_payload, dict):
|
||||
compiled_sql_candidates.extend(
|
||||
[
|
||||
result_payload.get("compiled_sql"),
|
||||
result_payload.get("sql"),
|
||||
result_payload.get("query"),
|
||||
]
|
||||
)
|
||||
|
||||
for candidate in compiled_sql_candidates:
|
||||
compiled_sql = str(candidate or "").strip()
|
||||
if compiled_sql:
|
||||
return {
|
||||
"compiled_sql": compiled_sql,
|
||||
"raw_response": response,
|
||||
}
|
||||
return None
|
||||
# [/DEF:SupersetCompilationAdapter._normalize_preview_response:Function]
|
||||
|
||||
# [DEF:SupersetCompilationAdapter._dump_json:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Serialize Superset request payload deterministically for network transport.
|
||||
def _dump_json(self, payload: Dict[str, Any]) -> str:
|
||||
import json
|
||||
|
||||
return json.dumps(payload, sort_keys=True, default=str)
|
||||
# [/DEF:SupersetCompilationAdapter._dump_json:Function]
|
||||
# [/DEF:SupersetCompilationAdapter:Class]
|
||||
|
||||
# [/DEF:SupersetCompilationAdapter:Module]
|
||||
909
backend/src/core/utils/superset_context_extractor.py
Normal file
909
backend/src/core/utils/superset_context_extractor.py
Normal file
@@ -0,0 +1,909 @@
|
||||
# [DEF:SupersetContextExtractor:Module]
|
||||
# @COMPLEXITY: 4
|
||||
# @SEMANTICS: dataset_review, superset, link_parsing, context_recovery, partial_recovery
|
||||
# @PURPOSE: Recover dataset and dashboard context from Superset links while preserving explicit partial-recovery markers.
|
||||
# @LAYER: Infra
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient:Class]
|
||||
# @RELATION: [DEPENDS_ON] ->[ImportedFilter]
|
||||
# @RELATION: [DEPENDS_ON] ->[TemplateVariable]
|
||||
# @PRE: Superset link or dataset reference must be parseable enough to resolve an environment-scoped target resource.
|
||||
# @POST: Returns the best available recovered context with explicit provenance and partial-recovery markers when necessary.
|
||||
# @SIDE_EFFECT: Performs upstream Superset API reads.
|
||||
# @INVARIANT: Partial recovery is surfaced explicitly and never misrepresented as fully confirmed context.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:SupersetContextExtractor.imports:Block]
|
||||
import json
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
from urllib.parse import parse_qs, unquote, urlparse
|
||||
|
||||
from src.core.config_models import Environment
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.core.superset_client import SupersetClient
|
||||
# [/DEF:SupersetContextExtractor.imports:Block]
|
||||
|
||||
|
||||
# [DEF:SupersetParsedContext:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalized output of Superset link parsing for session intake and recovery.
|
||||
@dataclass
|
||||
class SupersetParsedContext:
|
||||
source_url: str
|
||||
dataset_ref: str
|
||||
dataset_id: Optional[int] = None
|
||||
dashboard_id: Optional[int] = None
|
||||
chart_id: Optional[int] = None
|
||||
resource_type: str = "unknown"
|
||||
query_state: Dict[str, Any] = field(default_factory=dict)
|
||||
imported_filters: List[Dict[str, Any]] = field(default_factory=list)
|
||||
unresolved_references: List[str] = field(default_factory=list)
|
||||
partial_recovery: bool = False
|
||||
# [/DEF:SupersetParsedContext:Class]
|
||||
|
||||
|
||||
# [DEF:SupersetContextExtractor:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Parse supported Superset URLs and recover canonical dataset/dashboard references for review-session intake.
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient]
|
||||
# @PRE: constructor receives a configured environment with a usable Superset base URL.
|
||||
# @POST: extractor instance is ready to parse links against one Superset environment.
|
||||
# @SIDE_EFFECT: downstream parse operations may call Superset APIs through SupersetClient.
|
||||
class SupersetContextExtractor:
|
||||
# [DEF:SupersetContextExtractor.__init__:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Bind extractor to one Superset environment and client instance.
|
||||
def __init__(self, environment: Environment, client: Optional[SupersetClient] = None) -> None:
|
||||
self.environment = environment
|
||||
self.client = client or SupersetClient(environment)
|
||||
# [/DEF:SupersetContextExtractor.__init__:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor.parse_superset_link:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Extract candidate identifiers and query state from supported Superset URLs.
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient]
|
||||
# @PRE: link is a non-empty Superset URL compatible with the configured environment.
|
||||
# @POST: returns resolved dataset/dashboard context, preserving explicit partial-recovery state if some identifiers cannot be confirmed.
|
||||
# @SIDE_EFFECT: may issue Superset API reads to resolve dataset references from dashboard or chart URLs.
|
||||
# @DATA_CONTRACT: Input[link:str] -> Output[SupersetParsedContext]
|
||||
def parse_superset_link(self, link: str) -> SupersetParsedContext:
|
||||
with belief_scope("SupersetContextExtractor.parse_superset_link"):
|
||||
normalized_link = str(link or "").strip()
|
||||
if not normalized_link:
|
||||
logger.explore("Rejected empty Superset link during intake")
|
||||
raise ValueError("Superset link must be non-empty")
|
||||
|
||||
parsed_url = urlparse(normalized_link)
|
||||
if parsed_url.scheme not in {"http", "https"} or not parsed_url.netloc:
|
||||
logger.explore(
|
||||
"Superset link is not a parseable absolute URL",
|
||||
extra={"link": normalized_link},
|
||||
)
|
||||
raise ValueError("Superset link must be an absolute http(s) URL")
|
||||
|
||||
logger.reason(
|
||||
"Parsing Superset link for dataset review intake",
|
||||
extra={"path": parsed_url.path, "query": parsed_url.query},
|
||||
)
|
||||
|
||||
path_parts = [part for part in parsed_url.path.split("/") if part]
|
||||
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
|
||||
query_state = self._decode_query_state(query_params)
|
||||
|
||||
dataset_id = self._extract_numeric_identifier(path_parts, "dataset")
|
||||
dashboard_id = self._extract_numeric_identifier(path_parts, "dashboard")
|
||||
dashboard_ref = self._extract_dashboard_reference(path_parts)
|
||||
dashboard_permalink_key = self._extract_dashboard_permalink_key(path_parts)
|
||||
chart_id = self._extract_numeric_identifier(path_parts, "chart")
|
||||
|
||||
resource_type = "unknown"
|
||||
dataset_ref: Optional[str] = None
|
||||
partial_recovery = False
|
||||
unresolved_references: List[str] = []
|
||||
|
||||
if dataset_id is not None:
|
||||
resource_type = "dataset"
|
||||
dataset_ref = f"dataset:{dataset_id}"
|
||||
logger.reason(
|
||||
"Resolved direct dataset link",
|
||||
extra={"dataset_id": dataset_id},
|
||||
)
|
||||
elif dashboard_permalink_key is not None:
|
||||
resource_type = "dashboard"
|
||||
partial_recovery = True
|
||||
dataset_ref = f"dashboard_permalink:{dashboard_permalink_key}"
|
||||
unresolved_references.append("dashboard_permalink_dataset_binding_unresolved")
|
||||
logger.reason(
|
||||
"Resolving dashboard permalink state from Superset",
|
||||
extra={"permalink_key": dashboard_permalink_key},
|
||||
)
|
||||
permalink_payload = self.client.get_dashboard_permalink_state(dashboard_permalink_key)
|
||||
permalink_state = (
|
||||
permalink_payload.get("state", permalink_payload)
|
||||
if isinstance(permalink_payload, dict)
|
||||
else {}
|
||||
)
|
||||
if isinstance(permalink_state, dict):
|
||||
for key, value in permalink_state.items():
|
||||
query_state.setdefault(key, value)
|
||||
resolved_dashboard_id = self._extract_dashboard_id_from_state(permalink_state)
|
||||
resolved_chart_id = self._extract_chart_id_from_state(permalink_state)
|
||||
if resolved_dashboard_id is not None:
|
||||
dashboard_id = resolved_dashboard_id
|
||||
unresolved_references = [
|
||||
item
|
||||
for item in unresolved_references
|
||||
if item != "dashboard_permalink_dataset_binding_unresolved"
|
||||
]
|
||||
dataset_id, unresolved_references = self._recover_dataset_binding_from_dashboard(
|
||||
dashboard_id=dashboard_id,
|
||||
dataset_ref=dataset_ref,
|
||||
unresolved_references=unresolved_references,
|
||||
)
|
||||
if dataset_id is not None:
|
||||
dataset_ref = f"dataset:{dataset_id}"
|
||||
elif resolved_chart_id is not None:
|
||||
chart_id = resolved_chart_id
|
||||
unresolved_references = [
|
||||
item
|
||||
for item in unresolved_references
|
||||
if item != "dashboard_permalink_dataset_binding_unresolved"
|
||||
]
|
||||
try:
|
||||
chart_payload = self.client.get_chart(chart_id)
|
||||
chart_data = chart_payload.get("result", chart_payload) if isinstance(chart_payload, dict) else {}
|
||||
datasource_id = chart_data.get("datasource_id")
|
||||
if datasource_id is not None:
|
||||
dataset_id = int(datasource_id)
|
||||
dataset_ref = f"dataset:{dataset_id}"
|
||||
logger.reason(
|
||||
"Recovered dataset reference from permalink chart context",
|
||||
extra={"chart_id": chart_id, "dataset_id": dataset_id},
|
||||
)
|
||||
else:
|
||||
unresolved_references.append("chart_dataset_binding_unresolved")
|
||||
except Exception as exc:
|
||||
unresolved_references.append("chart_dataset_binding_unresolved")
|
||||
logger.explore(
|
||||
"Chart lookup failed during permalink recovery",
|
||||
extra={"chart_id": chart_id, "error": str(exc)},
|
||||
)
|
||||
else:
|
||||
logger.explore(
|
||||
"Dashboard permalink state was not a structured object",
|
||||
extra={"permalink_key": dashboard_permalink_key},
|
||||
)
|
||||
elif dashboard_id is not None or dashboard_ref is not None:
|
||||
resource_type = "dashboard"
|
||||
resolved_dashboard_ref = dashboard_id if dashboard_id is not None else dashboard_ref
|
||||
logger.reason(
|
||||
"Resolving dashboard-bound dataset from Superset",
|
||||
extra={"dashboard_ref": resolved_dashboard_ref},
|
||||
)
|
||||
dashboard_detail = self.client.get_dashboard_detail(resolved_dashboard_ref)
|
||||
resolved_dashboard_id = dashboard_detail.get("id")
|
||||
if resolved_dashboard_id is not None:
|
||||
dashboard_id = int(resolved_dashboard_id)
|
||||
datasets = dashboard_detail.get("datasets") or []
|
||||
if datasets:
|
||||
first_dataset = datasets[0]
|
||||
resolved_dataset_id = first_dataset.get("id")
|
||||
if resolved_dataset_id is not None:
|
||||
dataset_id = int(resolved_dataset_id)
|
||||
dataset_ref = f"dataset:{dataset_id}"
|
||||
logger.reason(
|
||||
"Recovered dataset reference from dashboard context",
|
||||
extra={
|
||||
"dashboard_id": dashboard_id,
|
||||
"dataset_id": dataset_id,
|
||||
"dataset_count": len(datasets),
|
||||
},
|
||||
)
|
||||
if len(datasets) > 1:
|
||||
partial_recovery = True
|
||||
unresolved_references.append("multiple_dashboard_datasets")
|
||||
else:
|
||||
partial_recovery = True
|
||||
unresolved_references.append("dashboard_dataset_id_missing")
|
||||
else:
|
||||
partial_recovery = True
|
||||
unresolved_references.append("dashboard_dataset_binding_missing")
|
||||
elif chart_id is not None:
|
||||
resource_type = "chart"
|
||||
partial_recovery = True
|
||||
unresolved_references.append("chart_dataset_binding_unresolved")
|
||||
dataset_ref = f"chart:{chart_id}"
|
||||
logger.reason(
|
||||
"Accepted chart link with explicit partial recovery",
|
||||
extra={"chart_id": chart_id},
|
||||
)
|
||||
else:
|
||||
logger.explore(
|
||||
"Unsupported Superset link shape encountered",
|
||||
extra={"path": parsed_url.path},
|
||||
)
|
||||
raise ValueError("Unsupported Superset link shape")
|
||||
|
||||
if dataset_id is not None:
|
||||
try:
|
||||
dataset_detail = self.client.get_dataset_detail(dataset_id)
|
||||
table_name = str(dataset_detail.get("table_name") or "").strip()
|
||||
schema_name = str(dataset_detail.get("schema") or "").strip()
|
||||
if table_name:
|
||||
dataset_ref = (
|
||||
f"{schema_name}.{table_name}" if schema_name else table_name
|
||||
)
|
||||
logger.reason(
|
||||
"Canonicalized dataset reference from dataset detail",
|
||||
extra={"dataset_ref": dataset_ref, "dataset_id": dataset_id},
|
||||
)
|
||||
except Exception as exc:
|
||||
partial_recovery = True
|
||||
unresolved_references.append("dataset_detail_lookup_failed")
|
||||
logger.explore(
|
||||
"Dataset detail lookup failed during link parsing; keeping session usable",
|
||||
extra={"dataset_id": dataset_id, "error": str(exc)},
|
||||
)
|
||||
|
||||
imported_filters = self._extract_imported_filters(query_state)
|
||||
result = SupersetParsedContext(
|
||||
source_url=normalized_link,
|
||||
dataset_ref=dataset_ref or "unresolved",
|
||||
dataset_id=dataset_id,
|
||||
dashboard_id=dashboard_id,
|
||||
chart_id=chart_id,
|
||||
resource_type=resource_type,
|
||||
query_state=query_state,
|
||||
imported_filters=imported_filters,
|
||||
unresolved_references=unresolved_references,
|
||||
partial_recovery=partial_recovery,
|
||||
)
|
||||
logger.reflect(
|
||||
"Superset link parsing completed",
|
||||
extra={
|
||||
"dataset_ref": result.dataset_ref,
|
||||
"dataset_id": result.dataset_id,
|
||||
"dashboard_id": result.dashboard_id,
|
||||
"chart_id": result.chart_id,
|
||||
"partial_recovery": result.partial_recovery,
|
||||
"unresolved_references": result.unresolved_references,
|
||||
"imported_filters": len(result.imported_filters),
|
||||
},
|
||||
)
|
||||
return result
|
||||
# [/DEF:SupersetContextExtractor.parse_superset_link:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor.recover_imported_filters:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Build imported filter entries from URL state and Superset-side saved context.
|
||||
# @RELATION: [CALLS] ->[backend.src.core.superset_client.SupersetClient]
|
||||
# @PRE: parsed_context comes from a successful Superset link parse for one environment.
|
||||
# @POST: returns explicit recovered and partial filter entries with preserved provenance and confirmation requirements.
|
||||
# @SIDE_EFFECT: may issue Superset reads for dashboard metadata enrichment.
|
||||
# @DATA_CONTRACT: Input[SupersetParsedContext] -> Output[List[Dict[str,Any]]]
|
||||
def recover_imported_filters(self, parsed_context: SupersetParsedContext) -> List[Dict[str, Any]]:
|
||||
with belief_scope("SupersetContextExtractor.recover_imported_filters"):
|
||||
recovered_filters: List[Dict[str, Any]] = []
|
||||
seen_filter_keys: Set[str] = set()
|
||||
|
||||
for item in parsed_context.imported_filters:
|
||||
normalized = self._normalize_imported_filter_payload(
|
||||
item,
|
||||
default_source="superset_url",
|
||||
default_note="Recovered from Superset URL state",
|
||||
)
|
||||
filter_key = normalized["filter_name"].strip().lower()
|
||||
if filter_key in seen_filter_keys:
|
||||
continue
|
||||
seen_filter_keys.add(filter_key)
|
||||
recovered_filters.append(normalized)
|
||||
|
||||
if parsed_context.dashboard_id is None:
|
||||
logger.reflect(
|
||||
"Imported filter recovery completed without dashboard enrichment",
|
||||
extra={
|
||||
"dashboard_id": None,
|
||||
"filter_count": len(recovered_filters),
|
||||
"partial_recovery": parsed_context.partial_recovery,
|
||||
},
|
||||
)
|
||||
return recovered_filters
|
||||
|
||||
try:
|
||||
dashboard_payload = self.client.get_dashboard(parsed_context.dashboard_id)
|
||||
dashboard_record = (
|
||||
dashboard_payload.get("result", dashboard_payload)
|
||||
if isinstance(dashboard_payload, dict)
|
||||
else {}
|
||||
)
|
||||
json_metadata = dashboard_record.get("json_metadata")
|
||||
if isinstance(json_metadata, str) and json_metadata.strip():
|
||||
json_metadata = json.loads(json_metadata)
|
||||
if not isinstance(json_metadata, dict):
|
||||
json_metadata = {}
|
||||
|
||||
native_filter_configuration = json_metadata.get("native_filter_configuration") or []
|
||||
default_filters = json_metadata.get("default_filters") or {}
|
||||
if isinstance(default_filters, str) and default_filters.strip():
|
||||
try:
|
||||
default_filters = json.loads(default_filters)
|
||||
except Exception:
|
||||
logger.explore(
|
||||
"Superset default_filters payload was not valid JSON",
|
||||
extra={"dashboard_id": parsed_context.dashboard_id},
|
||||
)
|
||||
default_filters = {}
|
||||
|
||||
for item in native_filter_configuration:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
filter_name = str(
|
||||
item.get("name")
|
||||
or item.get("filter_name")
|
||||
or item.get("column")
|
||||
or ""
|
||||
).strip()
|
||||
if not filter_name:
|
||||
continue
|
||||
|
||||
filter_key = filter_name.lower()
|
||||
if filter_key in seen_filter_keys:
|
||||
continue
|
||||
|
||||
default_value = None
|
||||
if isinstance(default_filters, dict):
|
||||
default_value = default_filters.get(filter_name)
|
||||
|
||||
saved_filter = self._normalize_imported_filter_payload(
|
||||
{
|
||||
"filter_name": filter_name,
|
||||
"display_name": item.get("label") or item.get("name"),
|
||||
"raw_value": default_value,
|
||||
"source": "superset_native",
|
||||
"recovery_status": "recovered" if default_value is not None else "partial",
|
||||
"requires_confirmation": default_value is None,
|
||||
"notes": "Recovered from Superset dashboard native filter configuration",
|
||||
},
|
||||
default_source="superset_native",
|
||||
default_note="Recovered from Superset dashboard native filter configuration",
|
||||
)
|
||||
seen_filter_keys.add(filter_key)
|
||||
recovered_filters.append(saved_filter)
|
||||
|
||||
logger.reflect(
|
||||
"Imported filter recovery completed with dashboard enrichment",
|
||||
extra={
|
||||
"dashboard_id": parsed_context.dashboard_id,
|
||||
"filter_count": len(recovered_filters),
|
||||
"partial_entries": len(
|
||||
[
|
||||
item
|
||||
for item in recovered_filters
|
||||
if item["recovery_status"] == "partial"
|
||||
]
|
||||
),
|
||||
},
|
||||
)
|
||||
return recovered_filters
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"Dashboard native filter enrichment failed; preserving partial imported filters",
|
||||
extra={
|
||||
"dashboard_id": parsed_context.dashboard_id,
|
||||
"error": str(exc),
|
||||
"filter_count": len(recovered_filters),
|
||||
},
|
||||
)
|
||||
if not recovered_filters:
|
||||
recovered_filters.append(
|
||||
self._normalize_imported_filter_payload(
|
||||
{
|
||||
"filter_name": f"dashboard_{parsed_context.dashboard_id}_filters",
|
||||
"display_name": "Dashboard native filters",
|
||||
"raw_value": None,
|
||||
"source": "superset_native",
|
||||
"recovery_status": "partial",
|
||||
"requires_confirmation": True,
|
||||
"notes": "Superset dashboard filter configuration could not be recovered fully",
|
||||
},
|
||||
default_source="superset_native",
|
||||
default_note="Superset dashboard filter configuration could not be recovered fully",
|
||||
)
|
||||
)
|
||||
return recovered_filters
|
||||
# [/DEF:SupersetContextExtractor.recover_imported_filters:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor.discover_template_variables:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Detect runtime variables and Jinja references from dataset query-bearing fields.
|
||||
# @RELATION: [DEPENDS_ON] ->[TemplateVariable]
|
||||
# @PRE: dataset_payload is a Superset dataset-detail style payload with query-bearing fields when available.
|
||||
# @POST: returns deduplicated explicit variable records without executing Jinja or fabricating runtime values.
|
||||
# @SIDE_EFFECT: none.
|
||||
# @DATA_CONTRACT: Input[dataset_payload:Dict[str,Any]] -> Output[List[Dict[str,Any]]]
|
||||
def discover_template_variables(self, dataset_payload: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
with belief_scope("SupersetContextExtractor.discover_template_variables"):
|
||||
discovered: List[Dict[str, Any]] = []
|
||||
seen_variable_names: Set[str] = set()
|
||||
|
||||
for expression_source in self._collect_query_bearing_expressions(dataset_payload):
|
||||
for filter_match in re.finditer(
|
||||
r"filter_values\(\s*['\"]([^'\"]+)['\"]\s*\)",
|
||||
expression_source,
|
||||
flags=re.IGNORECASE,
|
||||
):
|
||||
variable_name = str(filter_match.group(1) or "").strip()
|
||||
if not variable_name:
|
||||
continue
|
||||
self._append_template_variable(
|
||||
discovered=discovered,
|
||||
seen_variable_names=seen_variable_names,
|
||||
variable_name=variable_name,
|
||||
expression_source=expression_source,
|
||||
variable_kind="native_filter",
|
||||
is_required=True,
|
||||
default_value=None,
|
||||
)
|
||||
|
||||
for url_param_match in re.finditer(
|
||||
r"url_param\(\s*['\"]([^'\"]+)['\"]\s*(?:,\s*([^)]+))?\)",
|
||||
expression_source,
|
||||
flags=re.IGNORECASE,
|
||||
):
|
||||
variable_name = str(url_param_match.group(1) or "").strip()
|
||||
if not variable_name:
|
||||
continue
|
||||
default_literal = url_param_match.group(2)
|
||||
self._append_template_variable(
|
||||
discovered=discovered,
|
||||
seen_variable_names=seen_variable_names,
|
||||
variable_name=variable_name,
|
||||
expression_source=expression_source,
|
||||
variable_kind="parameter",
|
||||
is_required=default_literal is None,
|
||||
default_value=self._normalize_default_literal(default_literal),
|
||||
)
|
||||
|
||||
for jinja_match in re.finditer(r"\{\{\s*(.*?)\s*\}\}", expression_source, flags=re.DOTALL):
|
||||
expression = str(jinja_match.group(1) or "").strip()
|
||||
if not expression:
|
||||
continue
|
||||
if any(token in expression for token in ("filter_values(", "url_param(", "get_filters(")):
|
||||
continue
|
||||
variable_name = self._extract_primary_jinja_identifier(expression)
|
||||
if not variable_name:
|
||||
continue
|
||||
self._append_template_variable(
|
||||
discovered=discovered,
|
||||
seen_variable_names=seen_variable_names,
|
||||
variable_name=variable_name,
|
||||
expression_source=expression_source,
|
||||
variable_kind="derived" if "." in expression or "|" in expression else "parameter",
|
||||
is_required=True,
|
||||
default_value=None,
|
||||
)
|
||||
|
||||
logger.reflect(
|
||||
"Template variable discovery completed deterministically",
|
||||
extra={
|
||||
"dataset_id": dataset_payload.get("id"),
|
||||
"variable_count": len(discovered),
|
||||
"variable_names": [item["variable_name"] for item in discovered],
|
||||
},
|
||||
)
|
||||
return discovered
|
||||
# [/DEF:SupersetContextExtractor.discover_template_variables:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor.build_recovery_summary:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Summarize recovered, partial, and unresolved context for session state and UX.
|
||||
def build_recovery_summary(self, parsed_context: SupersetParsedContext) -> Dict[str, Any]:
|
||||
return {
|
||||
"dataset_ref": parsed_context.dataset_ref,
|
||||
"dataset_id": parsed_context.dataset_id,
|
||||
"dashboard_id": parsed_context.dashboard_id,
|
||||
"chart_id": parsed_context.chart_id,
|
||||
"partial_recovery": parsed_context.partial_recovery,
|
||||
"unresolved_references": list(parsed_context.unresolved_references),
|
||||
"imported_filter_count": len(parsed_context.imported_filters),
|
||||
}
|
||||
# [/DEF:SupersetContextExtractor.build_recovery_summary:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_numeric_identifier:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a numeric identifier from a REST-like Superset URL path.
|
||||
def _extract_numeric_identifier(self, path_parts: List[str], resource_name: str) -> Optional[int]:
|
||||
if resource_name not in path_parts:
|
||||
return None
|
||||
try:
|
||||
resource_index = path_parts.index(resource_name)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if resource_index + 1 >= len(path_parts):
|
||||
return None
|
||||
|
||||
candidate = str(path_parts[resource_index + 1]).strip()
|
||||
if not candidate.isdigit():
|
||||
return None
|
||||
return int(candidate)
|
||||
# [/DEF:SupersetContextExtractor._extract_numeric_identifier:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_dashboard_reference:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a dashboard id-or-slug reference from a Superset URL path.
|
||||
def _extract_dashboard_reference(self, path_parts: List[str]) -> Optional[str]:
|
||||
if "dashboard" not in path_parts:
|
||||
return None
|
||||
try:
|
||||
resource_index = path_parts.index("dashboard")
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if resource_index + 1 >= len(path_parts):
|
||||
return None
|
||||
|
||||
candidate = str(path_parts[resource_index + 1]).strip()
|
||||
if not candidate or candidate == "p":
|
||||
return None
|
||||
return candidate
|
||||
# [/DEF:SupersetContextExtractor._extract_dashboard_reference:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_dashboard_permalink_key:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a dashboard permalink key from a Superset URL path.
|
||||
def _extract_dashboard_permalink_key(self, path_parts: List[str]) -> Optional[str]:
|
||||
if "dashboard" not in path_parts:
|
||||
return None
|
||||
try:
|
||||
resource_index = path_parts.index("dashboard")
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if resource_index + 2 >= len(path_parts):
|
||||
return None
|
||||
|
||||
permalink_marker = str(path_parts[resource_index + 1]).strip()
|
||||
permalink_key = str(path_parts[resource_index + 2]).strip()
|
||||
if permalink_marker != "p" or not permalink_key:
|
||||
return None
|
||||
return permalink_key
|
||||
# [/DEF:SupersetContextExtractor._extract_dashboard_permalink_key:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_dashboard_id_from_state:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a dashboard identifier from returned permalink state when present.
|
||||
def _extract_dashboard_id_from_state(self, state: Dict[str, Any]) -> Optional[int]:
|
||||
return self._search_nested_numeric_key(
|
||||
payload=state,
|
||||
candidate_keys={"dashboardId", "dashboard_id", "dashboard_id_value"},
|
||||
)
|
||||
# [/DEF:SupersetContextExtractor._extract_dashboard_id_from_state:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_chart_id_from_state:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a chart identifier from returned permalink state when dashboard id is absent.
|
||||
def _extract_chart_id_from_state(self, state: Dict[str, Any]) -> Optional[int]:
|
||||
return self._search_nested_numeric_key(
|
||||
payload=state,
|
||||
candidate_keys={"slice_id", "sliceId", "chartId", "chart_id"},
|
||||
)
|
||||
# [/DEF:SupersetContextExtractor._extract_chart_id_from_state:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._search_nested_numeric_key:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Recursively search nested dict/list payloads for the first numeric value under a candidate key set.
|
||||
def _search_nested_numeric_key(self, payload: Any, candidate_keys: Set[str]) -> Optional[int]:
|
||||
if isinstance(payload, dict):
|
||||
for key, value in payload.items():
|
||||
if key in candidate_keys:
|
||||
try:
|
||||
if value is not None:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
found = self._search_nested_numeric_key(value, candidate_keys)
|
||||
if found is not None:
|
||||
return found
|
||||
elif isinstance(payload, list):
|
||||
for item in payload:
|
||||
found = self._search_nested_numeric_key(item, candidate_keys)
|
||||
if found is not None:
|
||||
return found
|
||||
return None
|
||||
# [/DEF:SupersetContextExtractor._search_nested_numeric_key:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._recover_dataset_binding_from_dashboard:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Recover a dataset binding from resolved dashboard context while preserving explicit unresolved markers.
|
||||
def _recover_dataset_binding_from_dashboard(
|
||||
self,
|
||||
dashboard_id: int,
|
||||
dataset_ref: Optional[str],
|
||||
unresolved_references: List[str],
|
||||
) -> tuple[Optional[int], List[str]]:
|
||||
dashboard_detail = self.client.get_dashboard_detail(dashboard_id)
|
||||
datasets = dashboard_detail.get("datasets") or []
|
||||
if datasets:
|
||||
first_dataset = datasets[0]
|
||||
resolved_dataset_id = first_dataset.get("id")
|
||||
if resolved_dataset_id is not None:
|
||||
resolved_dataset = int(resolved_dataset_id)
|
||||
logger.reason(
|
||||
"Recovered dataset reference from dashboard permalink context",
|
||||
extra={
|
||||
"dashboard_id": dashboard_id,
|
||||
"dataset_id": resolved_dataset,
|
||||
"dataset_count": len(datasets),
|
||||
"dataset_ref": dataset_ref,
|
||||
},
|
||||
)
|
||||
if len(datasets) > 1 and "multiple_dashboard_datasets" not in unresolved_references:
|
||||
unresolved_references.append("multiple_dashboard_datasets")
|
||||
return resolved_dataset, unresolved_references
|
||||
if "dashboard_dataset_id_missing" not in unresolved_references:
|
||||
unresolved_references.append("dashboard_dataset_id_missing")
|
||||
return None, unresolved_references
|
||||
|
||||
if "dashboard_dataset_binding_missing" not in unresolved_references:
|
||||
unresolved_references.append("dashboard_dataset_binding_missing")
|
||||
return None, unresolved_references
|
||||
# [/DEF:SupersetContextExtractor._recover_dataset_binding_from_dashboard:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._decode_query_state:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Decode query-string structures used by Superset URL state transport.
|
||||
def _decode_query_state(self, query_params: Dict[str, List[str]]) -> Dict[str, Any]:
|
||||
query_state: Dict[str, Any] = {}
|
||||
for key, values in query_params.items():
|
||||
if not values:
|
||||
continue
|
||||
raw_value = values[-1]
|
||||
decoded_value = unquote(raw_value)
|
||||
if key in {"native_filters", "form_data", "q"}:
|
||||
try:
|
||||
query_state[key] = json.loads(decoded_value)
|
||||
continue
|
||||
except Exception:
|
||||
logger.explore(
|
||||
"Failed to decode structured Superset query state; preserving raw value",
|
||||
extra={"key": key},
|
||||
)
|
||||
query_state[key] = decoded_value
|
||||
return query_state
|
||||
# [/DEF:SupersetContextExtractor._decode_query_state:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_imported_filters:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalize imported filters from decoded query state without fabricating missing values.
|
||||
def _extract_imported_filters(self, query_state: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
imported_filters: List[Dict[str, Any]] = []
|
||||
|
||||
native_filters_payload = query_state.get("native_filters")
|
||||
if isinstance(native_filters_payload, list):
|
||||
for index, item in enumerate(native_filters_payload):
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
filter_name = (
|
||||
item.get("filter_name")
|
||||
or item.get("column")
|
||||
or item.get("name")
|
||||
or f"native_filter_{index}"
|
||||
)
|
||||
imported_filters.append(
|
||||
{
|
||||
"filter_name": str(filter_name),
|
||||
"raw_value": item.get("value"),
|
||||
"display_name": item.get("label") or item.get("name"),
|
||||
"source": "superset_url",
|
||||
"recovery_status": "recovered"
|
||||
if item.get("value") is not None
|
||||
else "partial",
|
||||
"requires_confirmation": item.get("value") is None,
|
||||
"notes": "Recovered from Superset native filter URL state",
|
||||
}
|
||||
)
|
||||
|
||||
dashboard_data_mask = query_state.get("dataMask")
|
||||
if isinstance(dashboard_data_mask, dict):
|
||||
for filter_key, item in dashboard_data_mask.items():
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
filter_state = item.get("filterState")
|
||||
extra_form_data = item.get("extraFormData")
|
||||
display_name = None
|
||||
raw_value = None
|
||||
if isinstance(filter_state, dict):
|
||||
display_name = filter_state.get("label")
|
||||
raw_value = filter_state.get("value")
|
||||
if raw_value is None and isinstance(extra_form_data, dict):
|
||||
extra_filters = extra_form_data.get("filters")
|
||||
if isinstance(extra_filters, list) and extra_filters:
|
||||
first_filter = extra_filters[0]
|
||||
if isinstance(first_filter, dict):
|
||||
raw_value = first_filter.get("val")
|
||||
imported_filters.append(
|
||||
{
|
||||
"filter_name": str(item.get("id") or filter_key),
|
||||
"raw_value": raw_value,
|
||||
"display_name": display_name,
|
||||
"source": "superset_permalink",
|
||||
"recovery_status": "recovered" if raw_value is not None else "partial",
|
||||
"requires_confirmation": raw_value is None,
|
||||
"notes": "Recovered from Superset dashboard permalink state",
|
||||
}
|
||||
)
|
||||
|
||||
form_data_payload = query_state.get("form_data")
|
||||
if isinstance(form_data_payload, dict):
|
||||
extra_filters = form_data_payload.get("extra_filters") or []
|
||||
for index, item in enumerate(extra_filters):
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
filter_name = item.get("col") or item.get("column") or f"extra_filter_{index}"
|
||||
imported_filters.append(
|
||||
{
|
||||
"filter_name": str(filter_name),
|
||||
"raw_value": item.get("val"),
|
||||
"display_name": item.get("label"),
|
||||
"source": "superset_url",
|
||||
"recovery_status": "recovered"
|
||||
if item.get("val") is not None
|
||||
else "partial",
|
||||
"requires_confirmation": item.get("val") is None,
|
||||
"notes": "Recovered from Superset form_data extra_filters",
|
||||
}
|
||||
)
|
||||
|
||||
return imported_filters
|
||||
# [/DEF:SupersetContextExtractor._extract_imported_filters:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._normalize_imported_filter_payload:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalize one imported-filter payload with explicit provenance and confirmation state.
|
||||
def _normalize_imported_filter_payload(
|
||||
self,
|
||||
payload: Dict[str, Any],
|
||||
default_source: str,
|
||||
default_note: str,
|
||||
) -> Dict[str, Any]:
|
||||
raw_value = payload.get("raw_value")
|
||||
if "raw_value" not in payload and "value" in payload:
|
||||
raw_value = payload.get("value")
|
||||
|
||||
recovery_status = str(
|
||||
payload.get("recovery_status")
|
||||
or ("recovered" if raw_value is not None else "partial")
|
||||
).strip().lower()
|
||||
requires_confirmation = bool(
|
||||
payload.get("requires_confirmation", raw_value is None or recovery_status != "recovered")
|
||||
)
|
||||
return {
|
||||
"filter_name": str(payload.get("filter_name") or "unresolved_filter").strip(),
|
||||
"display_name": payload.get("display_name"),
|
||||
"raw_value": raw_value,
|
||||
"normalized_value": payload.get("normalized_value"),
|
||||
"source": str(payload.get("source") or default_source),
|
||||
"confidence_state": "imported" if raw_value is not None else "unresolved",
|
||||
"requires_confirmation": requires_confirmation,
|
||||
"recovery_status": recovery_status,
|
||||
"notes": str(payload.get("notes") or default_note),
|
||||
}
|
||||
# [/DEF:SupersetContextExtractor._normalize_imported_filter_payload:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._collect_query_bearing_expressions:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Collect SQL and expression-bearing dataset fields for deterministic template-variable discovery.
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetContextExtractor.discover_template_variables]
|
||||
def _collect_query_bearing_expressions(self, dataset_payload: Dict[str, Any]) -> List[str]:
|
||||
expressions: List[str] = []
|
||||
|
||||
def append_expression(candidate: Any) -> None:
|
||||
if not isinstance(candidate, str):
|
||||
return
|
||||
normalized = candidate.strip()
|
||||
if normalized:
|
||||
expressions.append(normalized)
|
||||
|
||||
append_expression(dataset_payload.get("sql"))
|
||||
append_expression(dataset_payload.get("query"))
|
||||
append_expression(dataset_payload.get("template_sql"))
|
||||
|
||||
metrics_payload = dataset_payload.get("metrics") or []
|
||||
if isinstance(metrics_payload, list):
|
||||
for metric in metrics_payload:
|
||||
if isinstance(metric, str):
|
||||
append_expression(metric)
|
||||
continue
|
||||
if not isinstance(metric, dict):
|
||||
continue
|
||||
append_expression(metric.get("expression"))
|
||||
append_expression(metric.get("sqlExpression"))
|
||||
append_expression(metric.get("metric_name"))
|
||||
|
||||
columns_payload = dataset_payload.get("columns") or []
|
||||
if isinstance(columns_payload, list):
|
||||
for column in columns_payload:
|
||||
if not isinstance(column, dict):
|
||||
continue
|
||||
append_expression(column.get("sqlExpression"))
|
||||
append_expression(column.get("expression"))
|
||||
|
||||
return expressions
|
||||
# [/DEF:SupersetContextExtractor._collect_query_bearing_expressions:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._append_template_variable:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Append one deduplicated template-variable descriptor.
|
||||
def _append_template_variable(
|
||||
self,
|
||||
discovered: List[Dict[str, Any]],
|
||||
seen_variable_names: Set[str],
|
||||
variable_name: str,
|
||||
expression_source: str,
|
||||
variable_kind: str,
|
||||
is_required: bool,
|
||||
default_value: Any,
|
||||
) -> None:
|
||||
normalized_name = str(variable_name or "").strip()
|
||||
if not normalized_name:
|
||||
return
|
||||
seen_key = normalized_name.lower()
|
||||
if seen_key in seen_variable_names:
|
||||
return
|
||||
seen_variable_names.add(seen_key)
|
||||
discovered.append(
|
||||
{
|
||||
"variable_name": normalized_name,
|
||||
"expression_source": expression_source,
|
||||
"variable_kind": variable_kind,
|
||||
"is_required": is_required,
|
||||
"default_value": default_value,
|
||||
"mapping_status": "unmapped",
|
||||
}
|
||||
)
|
||||
# [/DEF:SupersetContextExtractor._append_template_variable:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._extract_primary_jinja_identifier:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Extract a deterministic primary identifier from a Jinja expression without executing it.
|
||||
def _extract_primary_jinja_identifier(self, expression: str) -> Optional[str]:
|
||||
matched = re.match(r"([A-Za-z_][A-Za-z0-9_]*)", expression.strip())
|
||||
if matched is None:
|
||||
return None
|
||||
candidate = matched.group(1)
|
||||
if candidate in {"if", "else", "for", "set", "True", "False", "none", "None"}:
|
||||
return None
|
||||
return candidate
|
||||
# [/DEF:SupersetContextExtractor._extract_primary_jinja_identifier:Function]
|
||||
|
||||
# [DEF:SupersetContextExtractor._normalize_default_literal:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalize literal default fragments from template helper calls into JSON-safe values.
|
||||
def _normalize_default_literal(self, literal: Optional[str]) -> Any:
|
||||
normalized_literal = str(literal or "").strip()
|
||||
if not normalized_literal:
|
||||
return None
|
||||
if (
|
||||
(normalized_literal.startswith("'") and normalized_literal.endswith("'"))
|
||||
or (normalized_literal.startswith('"') and normalized_literal.endswith('"'))
|
||||
):
|
||||
return normalized_literal[1:-1]
|
||||
lowered = normalized_literal.lower()
|
||||
if lowered in {"true", "false"}:
|
||||
return lowered == "true"
|
||||
if lowered in {"null", "none"}:
|
||||
return None
|
||||
try:
|
||||
return int(normalized_literal)
|
||||
except ValueError:
|
||||
try:
|
||||
return float(normalized_literal)
|
||||
except ValueError:
|
||||
return normalized_literal
|
||||
# [/DEF:SupersetContextExtractor._normalize_default_literal:Function]
|
||||
# [/DEF:SupersetContextExtractor:Class]
|
||||
|
||||
# [/DEF:SupersetContextExtractor:Module]
|
||||
@@ -13,6 +13,8 @@ from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Dict, Any
|
||||
from pydantic import ConfigDict, Field, model_validator
|
||||
from pydantic.dataclasses import dataclass as pydantic_dataclass
|
||||
from sqlalchemy import Column, String, DateTime, JSON, ForeignKey, Integer, Boolean
|
||||
from sqlalchemy.orm import relationship
|
||||
from .mapping import Base
|
||||
@@ -22,12 +24,21 @@ from ..services.clean_release.enums import (
|
||||
)
|
||||
from ..services.clean_release.exceptions import IllegalTransitionError
|
||||
|
||||
# [DEF:ExecutionMode:Class]
|
||||
# @PURPOSE: Backward-compatible execution mode enum for legacy TUI/orchestrator tests.
|
||||
class ExecutionMode(str, Enum):
|
||||
TUI = "TUI"
|
||||
API = "API"
|
||||
SCHEDULER = "SCHEDULER"
|
||||
# [/DEF:ExecutionMode:Class]
|
||||
|
||||
# [DEF:CheckFinalStatus:Class]
|
||||
# @PURPOSE: Backward-compatible final status enum for legacy TUI/orchestrator tests.
|
||||
class CheckFinalStatus(str, Enum):
|
||||
COMPLIANT = "COMPLIANT"
|
||||
BLOCKED = "BLOCKED"
|
||||
FAILED = "FAILED"
|
||||
RUNNING = "RUNNING"
|
||||
# [/DEF:CheckFinalStatus:Class]
|
||||
|
||||
# [DEF:CheckStageName:Class]
|
||||
@@ -50,7 +61,7 @@ class CheckStageStatus(str, Enum):
|
||||
|
||||
# [DEF:CheckStageResult:Class]
|
||||
# @PURPOSE: Backward-compatible stage result container for legacy TUI/orchestrator tests.
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class CheckStageResult:
|
||||
stage: CheckStageName
|
||||
status: CheckStageStatus
|
||||
@@ -80,6 +91,7 @@ class ReleaseCandidateStatus(str, Enum):
|
||||
CHECK_RUNNING = CandidateStatus.CHECK_RUNNING.value
|
||||
CHECK_PASSED = CandidateStatus.CHECK_PASSED.value
|
||||
CHECK_BLOCKED = CandidateStatus.CHECK_BLOCKED.value
|
||||
BLOCKED = CandidateStatus.CHECK_BLOCKED.value
|
||||
CHECK_ERROR = CandidateStatus.CHECK_ERROR.value
|
||||
APPROVED = CandidateStatus.APPROVED.value
|
||||
PUBLISHED = CandidateStatus.PUBLISHED.value
|
||||
@@ -88,7 +100,7 @@ class ReleaseCandidateStatus(str, Enum):
|
||||
|
||||
# [DEF:ResourceSourceEntry:Class]
|
||||
# @PURPOSE: Backward-compatible source entry model for legacy TUI bootstrap logic.
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class ResourceSourceEntry:
|
||||
source_id: str
|
||||
host: str
|
||||
@@ -99,7 +111,7 @@ class ResourceSourceEntry:
|
||||
|
||||
# [DEF:ResourceSourceRegistry:Class]
|
||||
# @PURPOSE: Backward-compatible source registry model for legacy TUI bootstrap logic.
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class ResourceSourceRegistry:
|
||||
registry_id: str
|
||||
name: str
|
||||
@@ -107,6 +119,21 @@ class ResourceSourceRegistry:
|
||||
updated_at: datetime
|
||||
updated_by: str
|
||||
status: str = "ACTIVE"
|
||||
immutable: bool = True
|
||||
allowed_hosts: Optional[List[str]] = None
|
||||
allowed_schemes: Optional[List[str]] = None
|
||||
allowed_source_types: Optional[List[str]] = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def populate_legacy_allowlists(self):
|
||||
enabled_entries = [entry for entry in self.entries if getattr(entry, "enabled", True)]
|
||||
if self.allowed_hosts is None:
|
||||
self.allowed_hosts = [entry.host for entry in enabled_entries]
|
||||
if self.allowed_schemes is None:
|
||||
self.allowed_schemes = [entry.protocol for entry in enabled_entries]
|
||||
if self.allowed_source_types is None:
|
||||
self.allowed_source_types = [entry.purpose for entry in enabled_entries]
|
||||
return self
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
@@ -115,16 +142,35 @@ class ResourceSourceRegistry:
|
||||
|
||||
# [DEF:CleanProfilePolicy:Class]
|
||||
# @PURPOSE: Backward-compatible policy model for legacy TUI bootstrap logic.
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class CleanProfilePolicy:
|
||||
policy_id: str
|
||||
policy_version: str
|
||||
profile: str
|
||||
profile: ProfileType
|
||||
active: bool
|
||||
internal_source_registry_ref: str
|
||||
prohibited_artifact_categories: List[str]
|
||||
effective_from: datetime
|
||||
required_system_categories: Optional[List[str]] = None
|
||||
external_source_forbidden: bool = True
|
||||
immutable: bool = True
|
||||
content_json: Optional[Dict[str, Any]] = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_enterprise_policy(self):
|
||||
if self.profile == ProfileType.ENTERPRISE_CLEAN:
|
||||
if not self.prohibited_artifact_categories:
|
||||
raise ValueError("enterprise-clean policy requires prohibited_artifact_categories")
|
||||
if self.external_source_forbidden is not True:
|
||||
raise ValueError("enterprise-clean policy requires external_source_forbidden=true")
|
||||
if self.content_json is None:
|
||||
self.content_json = {
|
||||
"profile": self.profile.value,
|
||||
"prohibited_artifact_categories": list(self.prohibited_artifact_categories or []),
|
||||
"required_system_categories": list(self.required_system_categories or []),
|
||||
"external_source_forbidden": self.external_source_forbidden,
|
||||
}
|
||||
return self
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
@@ -137,15 +183,49 @@ class CleanProfilePolicy:
|
||||
|
||||
# [DEF:ComplianceCheckRun:Class]
|
||||
# @PURPOSE: Backward-compatible run model for legacy TUI typing/import compatibility.
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class ComplianceCheckRun:
|
||||
check_run_id: str
|
||||
candidate_id: str
|
||||
policy_id: str
|
||||
requested_by: str
|
||||
execution_mode: str
|
||||
checks: List[CheckStageResult]
|
||||
started_at: datetime
|
||||
triggered_by: str
|
||||
execution_mode: ExecutionMode
|
||||
final_status: CheckFinalStatus
|
||||
checks: List[CheckStageResult]
|
||||
finished_at: Optional[datetime] = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_final_status_alignment(self):
|
||||
mandatory_stages = {
|
||||
CheckStageName.DATA_PURITY,
|
||||
CheckStageName.INTERNAL_SOURCES_ONLY,
|
||||
CheckStageName.NO_EXTERNAL_ENDPOINTS,
|
||||
CheckStageName.MANIFEST_CONSISTENCY,
|
||||
}
|
||||
if self.final_status == CheckFinalStatus.COMPLIANT:
|
||||
observed_stages = {check.stage for check in self.checks}
|
||||
if observed_stages != mandatory_stages:
|
||||
raise ValueError("compliant run requires all mandatory stages")
|
||||
if any(check.status != CheckStageStatus.PASS for check in self.checks):
|
||||
raise ValueError("compliant run requires PASS on all mandatory stages")
|
||||
return self
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
return self.check_run_id
|
||||
|
||||
@property
|
||||
def run_id(self) -> str:
|
||||
return self.check_run_id
|
||||
|
||||
@property
|
||||
def status(self) -> RunStatus:
|
||||
if self.final_status == CheckFinalStatus.RUNNING:
|
||||
return RunStatus.RUNNING
|
||||
if self.final_status == CheckFinalStatus.BLOCKED:
|
||||
return RunStatus.FAILED
|
||||
return RunStatus.SUCCEEDED
|
||||
# [/DEF:ComplianceCheckRun:Class]
|
||||
|
||||
# [DEF:ReleaseCandidate:Class]
|
||||
@@ -164,6 +244,22 @@ class ReleaseCandidate(Base):
|
||||
created_by = Column(String, nullable=False)
|
||||
status = Column(String, default=CandidateStatus.DRAFT)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if "candidate_id" in kwargs:
|
||||
kwargs["id"] = kwargs.pop("candidate_id")
|
||||
if "profile" in kwargs:
|
||||
kwargs.pop("profile")
|
||||
status = kwargs.get("status")
|
||||
if status is None:
|
||||
kwargs["status"] = CandidateStatus.DRAFT.value
|
||||
elif isinstance(status, ReleaseCandidateStatus):
|
||||
kwargs["status"] = status.value
|
||||
elif isinstance(status, CandidateStatus):
|
||||
kwargs["status"] = status.value
|
||||
if not str(kwargs.get("id", "")).strip():
|
||||
raise ValueError("candidate_id must be non-empty")
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def candidate_id(self) -> str:
|
||||
return self.id
|
||||
@@ -214,7 +310,7 @@ class CandidateArtifact(Base):
|
||||
# [/DEF:CandidateArtifact:Class]
|
||||
|
||||
# [DEF:ManifestItem:Class]
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class ManifestItem:
|
||||
path: str
|
||||
category: str
|
||||
@@ -224,7 +320,7 @@ class ManifestItem:
|
||||
# [/DEF:ManifestItem:Class]
|
||||
|
||||
# [DEF:ManifestSummary:Class]
|
||||
@dataclass
|
||||
@pydantic_dataclass(config=ConfigDict(validate_assignment=True))
|
||||
class ManifestSummary:
|
||||
included_count: int
|
||||
excluded_count: int
|
||||
@@ -250,6 +346,9 @@ class DistributionManifest(Base):
|
||||
|
||||
# Redesign compatibility fields (not persisted directly but used by builder/facade)
|
||||
def __init__(self, **kwargs):
|
||||
items = kwargs.pop("items", None)
|
||||
summary = kwargs.pop("summary", None)
|
||||
|
||||
# Handle fields from manifest_builder.py
|
||||
if "manifest_id" in kwargs:
|
||||
kwargs["id"] = kwargs.pop("manifest_id")
|
||||
@@ -259,6 +358,13 @@ class DistributionManifest(Base):
|
||||
kwargs["created_by"] = kwargs.pop("generated_by")
|
||||
if "deterministic_hash" in kwargs:
|
||||
kwargs["manifest_digest"] = kwargs.pop("deterministic_hash")
|
||||
if "policy_id" in kwargs:
|
||||
kwargs.pop("policy_id")
|
||||
|
||||
if items is not None and summary is not None:
|
||||
expected_count = int(summary.included_count) + int(summary.excluded_count)
|
||||
if expected_count != len(items):
|
||||
raise ValueError("manifest summary counts must match items size")
|
||||
|
||||
# Ensure required DB fields have defaults if missing
|
||||
if "manifest_version" not in kwargs:
|
||||
@@ -269,10 +375,9 @@ class DistributionManifest(Base):
|
||||
kwargs["source_snapshot_ref"] = "pending"
|
||||
|
||||
# Pack items and summary into content_json if provided
|
||||
if "items" in kwargs or "summary" in kwargs:
|
||||
content = kwargs.get("content_json", {})
|
||||
if "items" in kwargs:
|
||||
items = kwargs.pop("items")
|
||||
if items is not None or summary is not None:
|
||||
content = dict(kwargs.get("content_json") or {})
|
||||
if items is not None:
|
||||
content["items"] = [
|
||||
{
|
||||
"path": i.path,
|
||||
@@ -282,8 +387,7 @@ class DistributionManifest(Base):
|
||||
"checksum": i.checksum
|
||||
} for i in items
|
||||
]
|
||||
if "summary" in kwargs:
|
||||
summary = kwargs.pop("summary")
|
||||
if summary is not None:
|
||||
content["summary"] = {
|
||||
"included_count": summary.included_count,
|
||||
"excluded_count": summary.excluded_count,
|
||||
@@ -292,6 +396,23 @@ class DistributionManifest(Base):
|
||||
kwargs["content_json"] = content
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def manifest_id(self) -> str:
|
||||
return self.id
|
||||
|
||||
@property
|
||||
def deterministic_hash(self) -> str:
|
||||
return self.manifest_digest
|
||||
|
||||
@property
|
||||
def summary(self) -> ManifestSummary:
|
||||
payload = (self.content_json or {}).get("summary", {})
|
||||
return ManifestSummary(
|
||||
included_count=int(payload.get("included_count", 0)),
|
||||
excluded_count=int(payload.get("excluded_count", 0)),
|
||||
prohibited_detected_count=int(payload.get("prohibited_detected_count", 0)),
|
||||
)
|
||||
# [/DEF:DistributionManifest:Class]
|
||||
|
||||
# [DEF:SourceRegistrySnapshot:Class]
|
||||
@@ -363,6 +484,24 @@ class ComplianceStageRun(Base):
|
||||
details_json = Column(JSON, default=dict)
|
||||
# [/DEF:ComplianceStageRun:Class]
|
||||
|
||||
# [DEF:ViolationSeverity:Class]
|
||||
# @PURPOSE: Backward-compatible violation severity enum for legacy clean-release tests.
|
||||
class ViolationSeverity(str, Enum):
|
||||
CRITICAL = "CRITICAL"
|
||||
MAJOR = "MAJOR"
|
||||
MINOR = "MINOR"
|
||||
# [/DEF:ViolationSeverity:Class]
|
||||
|
||||
# [DEF:ViolationCategory:Class]
|
||||
# @PURPOSE: Backward-compatible violation category enum for legacy clean-release tests.
|
||||
class ViolationCategory(str, Enum):
|
||||
DATA_PURITY = "DATA_PURITY"
|
||||
EXTERNAL_SOURCE = "EXTERNAL_SOURCE"
|
||||
SOURCE_ISOLATION = "SOURCE_ISOLATION"
|
||||
MANIFEST_CONSISTENCY = "MANIFEST_CONSISTENCY"
|
||||
EXTERNAL_ENDPOINT = "EXTERNAL_ENDPOINT"
|
||||
# [/DEF:ViolationCategory:Class]
|
||||
|
||||
# [DEF:ComplianceViolation:Class]
|
||||
# @PURPOSE: Violation produced by a stage.
|
||||
class ComplianceViolation(Base):
|
||||
@@ -377,6 +516,66 @@ class ComplianceViolation(Base):
|
||||
artifact_sha256 = Column(String, nullable=True)
|
||||
message = Column(String, nullable=False)
|
||||
evidence_json = Column(JSON, default=dict)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if "violation_id" in kwargs:
|
||||
kwargs["id"] = kwargs.pop("violation_id")
|
||||
if "check_run_id" in kwargs:
|
||||
kwargs["run_id"] = kwargs.pop("check_run_id")
|
||||
if "category" in kwargs:
|
||||
category = kwargs.pop("category")
|
||||
kwargs["stage_name"] = category.value if isinstance(category, ViolationCategory) else str(category)
|
||||
if "location" in kwargs:
|
||||
kwargs["artifact_path"] = kwargs.pop("location")
|
||||
if "remediation" in kwargs:
|
||||
remediation = kwargs.pop("remediation")
|
||||
evidence = dict(kwargs.get("evidence_json") or {})
|
||||
evidence["remediation"] = remediation
|
||||
kwargs["evidence_json"] = evidence
|
||||
if "blocked_release" in kwargs:
|
||||
blocked_release = kwargs.pop("blocked_release")
|
||||
evidence = dict(kwargs.get("evidence_json") or {})
|
||||
evidence["blocked_release"] = blocked_release
|
||||
kwargs["evidence_json"] = evidence
|
||||
if "detected_at" in kwargs:
|
||||
kwargs.pop("detected_at")
|
||||
if "code" not in kwargs:
|
||||
kwargs["code"] = "LEGACY_VIOLATION"
|
||||
if "message" not in kwargs:
|
||||
kwargs["message"] = kwargs.get("stage_name", "LEGACY_VIOLATION")
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def violation_id(self) -> str:
|
||||
return self.id
|
||||
|
||||
@violation_id.setter
|
||||
def violation_id(self, value: str) -> None:
|
||||
self.id = value
|
||||
|
||||
@property
|
||||
def check_run_id(self) -> str:
|
||||
return self.run_id
|
||||
|
||||
@property
|
||||
def category(self) -> ViolationCategory:
|
||||
return ViolationCategory(self.stage_name)
|
||||
|
||||
@category.setter
|
||||
def category(self, value: ViolationCategory) -> None:
|
||||
self.stage_name = value.value if isinstance(value, ViolationCategory) else str(value)
|
||||
|
||||
@property
|
||||
def location(self) -> Optional[str]:
|
||||
return self.artifact_path
|
||||
|
||||
@property
|
||||
def remediation(self) -> Optional[str]:
|
||||
return (self.evidence_json or {}).get("remediation")
|
||||
|
||||
@property
|
||||
def blocked_release(self) -> bool:
|
||||
return bool((self.evidence_json or {}).get("blocked_release", False))
|
||||
# [/DEF:ComplianceViolation:Class]
|
||||
|
||||
# [DEF:ComplianceReport:Class]
|
||||
@@ -392,6 +591,65 @@ class ComplianceReport(Base):
|
||||
summary_json = Column(JSON, nullable=False)
|
||||
generated_at = Column(DateTime, default=datetime.utcnow)
|
||||
immutable = Column(Boolean, default=True)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if "report_id" in kwargs:
|
||||
kwargs["id"] = kwargs.pop("report_id")
|
||||
if "check_run_id" in kwargs:
|
||||
kwargs["run_id"] = kwargs.pop("check_run_id")
|
||||
operator_summary = kwargs.pop("operator_summary", None)
|
||||
structured_payload_ref = kwargs.pop("structured_payload_ref", None)
|
||||
violations_count = kwargs.pop("violations_count", None)
|
||||
blocking_violations_count = kwargs.pop("blocking_violations_count", None)
|
||||
|
||||
final_status = kwargs.get("final_status")
|
||||
final_status_value = getattr(final_status, "value", final_status)
|
||||
|
||||
if (
|
||||
final_status_value in {CheckFinalStatus.BLOCKED.value, ComplianceDecision.BLOCKED.value}
|
||||
and blocking_violations_count is not None
|
||||
and int(blocking_violations_count) <= 0
|
||||
):
|
||||
raise ValueError("blocked report requires blocking violations")
|
||||
|
||||
if (
|
||||
operator_summary is not None
|
||||
or structured_payload_ref is not None
|
||||
or violations_count is not None
|
||||
or blocking_violations_count is not None
|
||||
):
|
||||
kwargs["summary_json"] = {
|
||||
"operator_summary": operator_summary or "",
|
||||
"structured_payload_ref": structured_payload_ref,
|
||||
"violations_count": int(violations_count or 0),
|
||||
"blocking_violations_count": int(blocking_violations_count or 0),
|
||||
}
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def report_id(self) -> str:
|
||||
return self.id
|
||||
|
||||
@property
|
||||
def check_run_id(self) -> str:
|
||||
return self.run_id
|
||||
|
||||
@property
|
||||
def operator_summary(self) -> str:
|
||||
return (self.summary_json or {}).get("operator_summary", "")
|
||||
|
||||
@property
|
||||
def structured_payload_ref(self) -> Optional[str]:
|
||||
return (self.summary_json or {}).get("structured_payload_ref")
|
||||
|
||||
@property
|
||||
def violations_count(self) -> int:
|
||||
return int((self.summary_json or {}).get("violations_count", 0))
|
||||
|
||||
@property
|
||||
def blocking_violations_count(self) -> int:
|
||||
return int((self.summary_json or {}).get("blocking_violations_count", 0))
|
||||
# [/DEF:ComplianceReport:Class]
|
||||
|
||||
# [DEF:ApprovalDecision:Class]
|
||||
|
||||
681
backend/src/models/dataset_review.py
Normal file
681
backend/src/models/dataset_review.py
Normal file
@@ -0,0 +1,681 @@
|
||||
# [DEF:DatasetReviewModels:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: dataset_review, session, profile, findings, semantics, clarification, execution, sqlalchemy
|
||||
# @PURPOSE: SQLAlchemy models for the dataset review orchestration flow.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: DEPENDS_ON -> [AuthModels]
|
||||
# @RELATION: DEPENDS_ON -> [MappingModels]
|
||||
#
|
||||
# @INVARIANT: Session and profile entities are strictly scoped to an authenticated user.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
import uuid
|
||||
import enum
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, Float, Enum as SQLEnum, Table
|
||||
from sqlalchemy.orm import relationship
|
||||
from .mapping import Base
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:SessionStatus:Class]
|
||||
class SessionStatus(str, enum.Enum):
|
||||
ACTIVE = "active"
|
||||
PAUSED = "paused"
|
||||
COMPLETED = "completed"
|
||||
ARCHIVED = "archived"
|
||||
CANCELLED = "cancelled"
|
||||
# [/DEF:SessionStatus:Class]
|
||||
|
||||
# [DEF:SessionPhase:Class]
|
||||
class SessionPhase(str, enum.Enum):
|
||||
INTAKE = "intake"
|
||||
RECOVERY = "recovery"
|
||||
REVIEW = "review"
|
||||
SEMANTIC_REVIEW = "semantic_review"
|
||||
CLARIFICATION = "clarification"
|
||||
MAPPING_REVIEW = "mapping_review"
|
||||
PREVIEW = "preview"
|
||||
LAUNCH = "launch"
|
||||
POST_RUN = "post_run"
|
||||
# [/DEF:SessionPhase:Class]
|
||||
|
||||
# [DEF:ReadinessState:Class]
|
||||
class ReadinessState(str, enum.Enum):
|
||||
EMPTY = "empty"
|
||||
IMPORTING = "importing"
|
||||
REVIEW_READY = "review_ready"
|
||||
SEMANTIC_SOURCE_REVIEW_NEEDED = "semantic_source_review_needed"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
CLARIFICATION_ACTIVE = "clarification_active"
|
||||
MAPPING_REVIEW_NEEDED = "mapping_review_needed"
|
||||
COMPILED_PREVIEW_READY = "compiled_preview_ready"
|
||||
PARTIALLY_READY = "partially_ready"
|
||||
RUN_READY = "run_ready"
|
||||
RUN_IN_PROGRESS = "run_in_progress"
|
||||
COMPLETED = "completed"
|
||||
RECOVERY_REQUIRED = "recovery_required"
|
||||
# [/DEF:ReadinessState:Class]
|
||||
|
||||
# [DEF:RecommendedAction:Class]
|
||||
class RecommendedAction(str, enum.Enum):
|
||||
IMPORT_FROM_SUPERSET = "import_from_superset"
|
||||
REVIEW_DOCUMENTATION = "review_documentation"
|
||||
APPLY_SEMANTIC_SOURCE = "apply_semantic_source"
|
||||
START_CLARIFICATION = "start_clarification"
|
||||
ANSWER_NEXT_QUESTION = "answer_next_question"
|
||||
APPROVE_MAPPING = "approve_mapping"
|
||||
GENERATE_SQL_PREVIEW = "generate_sql_preview"
|
||||
COMPLETE_REQUIRED_VALUES = "complete_required_values"
|
||||
LAUNCH_DATASET = "launch_dataset"
|
||||
RESUME_SESSION = "resume_session"
|
||||
EXPORT_OUTPUTS = "export_outputs"
|
||||
# [/DEF:RecommendedAction:Class]
|
||||
|
||||
# [DEF:SessionCollaboratorRole:Class]
|
||||
class SessionCollaboratorRole(str, enum.Enum):
|
||||
VIEWER = "viewer"
|
||||
REVIEWER = "reviewer"
|
||||
APPROVER = "approver"
|
||||
# [/DEF:SessionCollaboratorRole:Class]
|
||||
|
||||
# [DEF:SessionCollaborator:Class]
|
||||
class SessionCollaborator(Base):
|
||||
__tablename__ = "session_collaborators"
|
||||
|
||||
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
user_id = Column(String, ForeignKey("users.id"), nullable=False)
|
||||
role = Column(SQLEnum(SessionCollaboratorRole), nullable=False)
|
||||
added_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="collaborators")
|
||||
user = relationship("User")
|
||||
# [/DEF:SessionCollaborator:Class]
|
||||
|
||||
# [DEF:DatasetReviewSession:Class]
|
||||
class DatasetReviewSession(Base):
|
||||
__tablename__ = "dataset_review_sessions"
|
||||
|
||||
session_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
user_id = Column(String, ForeignKey("users.id"), nullable=False)
|
||||
environment_id = Column(String, ForeignKey("environments.id"), nullable=False)
|
||||
source_kind = Column(String, nullable=False) # superset_link, dataset_selection
|
||||
source_input = Column(String, nullable=False)
|
||||
dataset_ref = Column(String, nullable=False)
|
||||
dataset_id = Column(Integer, nullable=True)
|
||||
dashboard_id = Column(Integer, nullable=True)
|
||||
readiness_state = Column(SQLEnum(ReadinessState), nullable=False, default=ReadinessState.EMPTY)
|
||||
recommended_action = Column(SQLEnum(RecommendedAction), nullable=False, default=RecommendedAction.IMPORT_FROM_SUPERSET)
|
||||
status = Column(SQLEnum(SessionStatus), nullable=False, default=SessionStatus.ACTIVE)
|
||||
current_phase = Column(SQLEnum(SessionPhase), nullable=False, default=SessionPhase.INTAKE)
|
||||
active_task_id = Column(String, nullable=True)
|
||||
last_preview_id = Column(String, nullable=True)
|
||||
last_run_context_id = Column(String, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
last_activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
closed_at = Column(DateTime, nullable=True)
|
||||
|
||||
owner = relationship("User")
|
||||
collaborators = relationship("SessionCollaborator", back_populates="session", cascade="all, delete-orphan")
|
||||
profile = relationship("DatasetProfile", back_populates="session", uselist=False, cascade="all, delete-orphan")
|
||||
findings = relationship("ValidationFinding", back_populates="session", cascade="all, delete-orphan")
|
||||
semantic_sources = relationship("SemanticSource", back_populates="session", cascade="all, delete-orphan")
|
||||
semantic_fields = relationship("SemanticFieldEntry", back_populates="session", cascade="all, delete-orphan")
|
||||
imported_filters = relationship("ImportedFilter", back_populates="session", cascade="all, delete-orphan")
|
||||
template_variables = relationship("TemplateVariable", back_populates="session", cascade="all, delete-orphan")
|
||||
execution_mappings = relationship("ExecutionMapping", back_populates="session", cascade="all, delete-orphan")
|
||||
clarification_sessions = relationship("ClarificationSession", back_populates="session", cascade="all, delete-orphan")
|
||||
previews = relationship("CompiledPreview", back_populates="session", cascade="all, delete-orphan")
|
||||
run_contexts = relationship("DatasetRunContext", back_populates="session", cascade="all, delete-orphan")
|
||||
export_artifacts = relationship("ExportArtifact", back_populates="session", cascade="all, delete-orphan")
|
||||
events = relationship("SessionEvent", back_populates="session", cascade="all, delete-orphan")
|
||||
# [/DEF:DatasetReviewSession:Class]
|
||||
|
||||
# [DEF:BusinessSummarySource:Class]
|
||||
class BusinessSummarySource(str, enum.Enum):
|
||||
CONFIRMED = "confirmed"
|
||||
IMPORTED = "imported"
|
||||
INFERRED = "inferred"
|
||||
AI_DRAFT = "ai_draft"
|
||||
MANUAL_OVERRIDE = "manual_override"
|
||||
# [/DEF:BusinessSummarySource:Class]
|
||||
|
||||
# [DEF:ConfidenceState:Class]
|
||||
class ConfidenceState(str, enum.Enum):
|
||||
CONFIRMED = "confirmed"
|
||||
MOSTLY_CONFIRMED = "mostly_confirmed"
|
||||
MIXED = "mixed"
|
||||
LOW_CONFIDENCE = "low_confidence"
|
||||
UNRESOLVED = "unresolved"
|
||||
# [/DEF:ConfidenceState:Class]
|
||||
|
||||
# [DEF:DatasetProfile:Class]
|
||||
class DatasetProfile(Base):
|
||||
__tablename__ = "dataset_profiles"
|
||||
|
||||
profile_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False, unique=True)
|
||||
dataset_name = Column(String, nullable=False)
|
||||
schema_name = Column(String, nullable=True)
|
||||
database_name = Column(String, nullable=True)
|
||||
business_summary = Column(Text, nullable=False)
|
||||
business_summary_source = Column(SQLEnum(BusinessSummarySource), nullable=False)
|
||||
description = Column(Text, nullable=True)
|
||||
dataset_type = Column(String, nullable=True) # table, virtual, sqllab_view, unknown
|
||||
is_sqllab_view = Column(Boolean, nullable=False, default=False)
|
||||
completeness_score = Column(Float, nullable=True)
|
||||
confidence_state = Column(SQLEnum(ConfidenceState), nullable=False)
|
||||
has_blocking_findings = Column(Boolean, nullable=False, default=False)
|
||||
has_warning_findings = Column(Boolean, nullable=False, default=False)
|
||||
manual_summary_locked = Column(Boolean, nullable=False, default=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="profile")
|
||||
# [/DEF:DatasetProfile:Class]
|
||||
|
||||
# [DEF:FindingArea:Class]
|
||||
class FindingArea(str, enum.Enum):
|
||||
SOURCE_INTAKE = "source_intake"
|
||||
DATASET_PROFILE = "dataset_profile"
|
||||
SEMANTIC_ENRICHMENT = "semantic_enrichment"
|
||||
CLARIFICATION = "clarification"
|
||||
FILTER_RECOVERY = "filter_recovery"
|
||||
TEMPLATE_MAPPING = "template_mapping"
|
||||
COMPILED_PREVIEW = "compiled_preview"
|
||||
LAUNCH = "launch"
|
||||
AUDIT = "audit"
|
||||
# [/DEF:FindingArea:Class]
|
||||
|
||||
# [DEF:FindingSeverity:Class]
|
||||
class FindingSeverity(str, enum.Enum):
|
||||
BLOCKING = "blocking"
|
||||
WARNING = "warning"
|
||||
INFORMATIONAL = "informational"
|
||||
# [/DEF:FindingSeverity:Class]
|
||||
|
||||
# [DEF:ResolutionState:Class]
|
||||
class ResolutionState(str, enum.Enum):
|
||||
OPEN = "open"
|
||||
RESOLVED = "resolved"
|
||||
APPROVED = "approved"
|
||||
SKIPPED = "skipped"
|
||||
DEFERRED = "deferred"
|
||||
EXPERT_REVIEW = "expert_review"
|
||||
# [/DEF:ResolutionState:Class]
|
||||
|
||||
# [DEF:ValidationFinding:Class]
|
||||
class ValidationFinding(Base):
|
||||
__tablename__ = "validation_findings"
|
||||
|
||||
finding_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
area = Column(SQLEnum(FindingArea), nullable=False)
|
||||
severity = Column(SQLEnum(FindingSeverity), nullable=False)
|
||||
code = Column(String, nullable=False)
|
||||
title = Column(String, nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
resolution_state = Column(SQLEnum(ResolutionState), nullable=False, default=ResolutionState.OPEN)
|
||||
resolution_note = Column(Text, nullable=True)
|
||||
caused_by_ref = Column(String, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
resolved_at = Column(DateTime, nullable=True)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="findings")
|
||||
# [/DEF:ValidationFinding:Class]
|
||||
|
||||
# [DEF:SemanticSourceType:Class]
|
||||
class SemanticSourceType(str, enum.Enum):
|
||||
UPLOADED_FILE = "uploaded_file"
|
||||
CONNECTED_DICTIONARY = "connected_dictionary"
|
||||
REFERENCE_DATASET = "reference_dataset"
|
||||
NEIGHBOR_DATASET = "neighbor_dataset"
|
||||
AI_GENERATED = "ai_generated"
|
||||
# [/DEF:SemanticSourceType:Class]
|
||||
|
||||
# [DEF:TrustLevel:Class]
|
||||
class TrustLevel(str, enum.Enum):
|
||||
TRUSTED = "trusted"
|
||||
RECOMMENDED = "recommended"
|
||||
CANDIDATE = "candidate"
|
||||
GENERATED = "generated"
|
||||
# [/DEF:TrustLevel:Class]
|
||||
|
||||
# [DEF:SemanticSourceStatus:Class]
|
||||
class SemanticSourceStatus(str, enum.Enum):
|
||||
AVAILABLE = "available"
|
||||
SELECTED = "selected"
|
||||
APPLIED = "applied"
|
||||
REJECTED = "rejected"
|
||||
PARTIAL = "partial"
|
||||
FAILED = "failed"
|
||||
# [/DEF:SemanticSourceStatus:Class]
|
||||
|
||||
# [DEF:SemanticSource:Class]
|
||||
class SemanticSource(Base):
|
||||
__tablename__ = "semantic_sources"
|
||||
|
||||
source_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
source_type = Column(SQLEnum(SemanticSourceType), nullable=False)
|
||||
source_ref = Column(String, nullable=False)
|
||||
source_version = Column(String, nullable=False)
|
||||
display_name = Column(String, nullable=False)
|
||||
trust_level = Column(SQLEnum(TrustLevel), nullable=False)
|
||||
schema_overlap_score = Column(Float, nullable=True)
|
||||
status = Column(SQLEnum(SemanticSourceStatus), nullable=False, default=SemanticSourceStatus.AVAILABLE)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="semantic_sources")
|
||||
# [/DEF:SemanticSource:Class]
|
||||
|
||||
# [DEF:FieldKind:Class]
|
||||
class FieldKind(str, enum.Enum):
|
||||
COLUMN = "column"
|
||||
METRIC = "metric"
|
||||
FILTER_DIMENSION = "filter_dimension"
|
||||
PARAMETER = "parameter"
|
||||
# [/DEF:FieldKind:Class]
|
||||
|
||||
# [DEF:FieldProvenance:Class]
|
||||
class FieldProvenance(str, enum.Enum):
|
||||
DICTIONARY_EXACT = "dictionary_exact"
|
||||
REFERENCE_IMPORTED = "reference_imported"
|
||||
FUZZY_INFERRED = "fuzzy_inferred"
|
||||
AI_GENERATED = "ai_generated"
|
||||
MANUAL_OVERRIDE = "manual_override"
|
||||
UNRESOLVED = "unresolved"
|
||||
# [/DEF:FieldProvenance:Class]
|
||||
|
||||
# [DEF:SemanticFieldEntry:Class]
|
||||
class SemanticFieldEntry(Base):
|
||||
__tablename__ = "semantic_field_entries"
|
||||
|
||||
field_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
field_name = Column(String, nullable=False)
|
||||
field_kind = Column(SQLEnum(FieldKind), nullable=False)
|
||||
verbose_name = Column(String, nullable=True)
|
||||
description = Column(Text, nullable=True)
|
||||
display_format = Column(String, nullable=True)
|
||||
provenance = Column(SQLEnum(FieldProvenance), nullable=False, default=FieldProvenance.UNRESOLVED)
|
||||
source_id = Column(String, nullable=True)
|
||||
source_version = Column(String, nullable=True)
|
||||
confidence_rank = Column(Integer, nullable=True)
|
||||
is_locked = Column(Boolean, nullable=False, default=False)
|
||||
has_conflict = Column(Boolean, nullable=False, default=False)
|
||||
needs_review = Column(Boolean, nullable=False, default=True)
|
||||
last_changed_by = Column(String, nullable=False) # system, user, agent
|
||||
user_feedback = Column(String, nullable=True) # up, down, null
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="semantic_fields")
|
||||
candidates = relationship("SemanticCandidate", back_populates="field", cascade="all, delete-orphan")
|
||||
# [/DEF:SemanticFieldEntry:Class]
|
||||
|
||||
# [DEF:CandidateMatchType:Class]
|
||||
class CandidateMatchType(str, enum.Enum):
|
||||
EXACT = "exact"
|
||||
REFERENCE = "reference"
|
||||
FUZZY = "fuzzy"
|
||||
GENERATED = "generated"
|
||||
# [/DEF:CandidateMatchType:Class]
|
||||
|
||||
# [DEF:CandidateStatus:Class]
|
||||
class CandidateStatus(str, enum.Enum):
|
||||
PROPOSED = "proposed"
|
||||
ACCEPTED = "accepted"
|
||||
REJECTED = "rejected"
|
||||
SUPERSEDED = "superseded"
|
||||
# [/DEF:CandidateStatus:Class]
|
||||
|
||||
# [DEF:SemanticCandidate:Class]
|
||||
class SemanticCandidate(Base):
|
||||
__tablename__ = "semantic_candidates"
|
||||
|
||||
candidate_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
field_id = Column(String, ForeignKey("semantic_field_entries.field_id"), nullable=False)
|
||||
source_id = Column(String, nullable=True)
|
||||
candidate_rank = Column(Integer, nullable=False)
|
||||
match_type = Column(SQLEnum(CandidateMatchType), nullable=False)
|
||||
confidence_score = Column(Float, nullable=False)
|
||||
proposed_verbose_name = Column(String, nullable=True)
|
||||
proposed_description = Column(Text, nullable=True)
|
||||
proposed_display_format = Column(String, nullable=True)
|
||||
status = Column(SQLEnum(CandidateStatus), nullable=False, default=CandidateStatus.PROPOSED)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
field = relationship("SemanticFieldEntry", back_populates="candidates")
|
||||
# [/DEF:SemanticCandidate:Class]
|
||||
|
||||
# [DEF:FilterSource:Class]
|
||||
class FilterSource(str, enum.Enum):
|
||||
SUPERSET_NATIVE = "superset_native"
|
||||
SUPERSET_URL = "superset_url"
|
||||
MANUAL = "manual"
|
||||
INFERRED = "inferred"
|
||||
# [/DEF:FilterSource:Class]
|
||||
|
||||
# [DEF:FilterConfidenceState:Class]
|
||||
class FilterConfidenceState(str, enum.Enum):
|
||||
CONFIRMED = "confirmed"
|
||||
IMPORTED = "imported"
|
||||
INFERRED = "inferred"
|
||||
AI_DRAFT = "ai_draft"
|
||||
UNRESOLVED = "unresolved"
|
||||
# [/DEF:FilterConfidenceState:Class]
|
||||
|
||||
# [DEF:FilterRecoveryStatus:Class]
|
||||
class FilterRecoveryStatus(str, enum.Enum):
|
||||
RECOVERED = "recovered"
|
||||
PARTIAL = "partial"
|
||||
MISSING = "missing"
|
||||
CONFLICTED = "conflicted"
|
||||
# [/DEF:FilterRecoveryStatus:Class]
|
||||
|
||||
# [DEF:ImportedFilter:Class]
|
||||
class ImportedFilter(Base):
|
||||
__tablename__ = "imported_filters"
|
||||
|
||||
filter_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
filter_name = Column(String, nullable=False)
|
||||
display_name = Column(String, nullable=True)
|
||||
raw_value = Column(JSON, nullable=False)
|
||||
normalized_value = Column(JSON, nullable=True)
|
||||
source = Column(SQLEnum(FilterSource), nullable=False)
|
||||
confidence_state = Column(SQLEnum(FilterConfidenceState), nullable=False)
|
||||
requires_confirmation = Column(Boolean, nullable=False, default=False)
|
||||
recovery_status = Column(SQLEnum(FilterRecoveryStatus), nullable=False)
|
||||
notes = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="imported_filters")
|
||||
# [/DEF:ImportedFilter:Class]
|
||||
|
||||
# [DEF:VariableKind:Class]
|
||||
class VariableKind(str, enum.Enum):
|
||||
NATIVE_FILTER = "native_filter"
|
||||
PARAMETER = "parameter"
|
||||
DERIVED = "derived"
|
||||
UNKNOWN = "unknown"
|
||||
# [/DEF:VariableKind:Class]
|
||||
|
||||
# [DEF:MappingStatus:Class]
|
||||
class MappingStatus(str, enum.Enum):
|
||||
UNMAPPED = "unmapped"
|
||||
PROPOSED = "proposed"
|
||||
APPROVED = "approved"
|
||||
OVERRIDDEN = "overridden"
|
||||
INVALID = "invalid"
|
||||
# [/DEF:MappingStatus:Class]
|
||||
|
||||
# [DEF:TemplateVariable:Class]
|
||||
class TemplateVariable(Base):
|
||||
__tablename__ = "template_variables"
|
||||
|
||||
variable_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
variable_name = Column(String, nullable=False)
|
||||
expression_source = Column(Text, nullable=False)
|
||||
variable_kind = Column(SQLEnum(VariableKind), nullable=False)
|
||||
is_required = Column(Boolean, nullable=False, default=True)
|
||||
default_value = Column(JSON, nullable=True)
|
||||
mapping_status = Column(SQLEnum(MappingStatus), nullable=False, default=MappingStatus.UNMAPPED)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="template_variables")
|
||||
# [/DEF:TemplateVariable:Class]
|
||||
|
||||
# [DEF:MappingMethod:Class]
|
||||
class MappingMethod(str, enum.Enum):
|
||||
DIRECT_MATCH = "direct_match"
|
||||
HEURISTIC_MATCH = "heuristic_match"
|
||||
SEMANTIC_MATCH = "semantic_match"
|
||||
MANUAL_OVERRIDE = "manual_override"
|
||||
# [/DEF:MappingMethod:Class]
|
||||
|
||||
# [DEF:MappingWarningLevel:Class]
|
||||
class MappingWarningLevel(str, enum.Enum):
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
# [/DEF:MappingWarningLevel:Class]
|
||||
|
||||
# [DEF:ApprovalState:Class]
|
||||
class ApprovalState(str, enum.Enum):
|
||||
PENDING = "pending"
|
||||
APPROVED = "approved"
|
||||
REJECTED = "rejected"
|
||||
NOT_REQUIRED = "not_required"
|
||||
# [/DEF:ApprovalState:Class]
|
||||
|
||||
# [DEF:ExecutionMapping:Class]
|
||||
class ExecutionMapping(Base):
|
||||
__tablename__ = "execution_mappings"
|
||||
|
||||
mapping_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
filter_id = Column(String, nullable=False)
|
||||
variable_id = Column(String, nullable=False)
|
||||
mapping_method = Column(SQLEnum(MappingMethod), nullable=False)
|
||||
raw_input_value = Column(JSON, nullable=False)
|
||||
effective_value = Column(JSON, nullable=True)
|
||||
transformation_note = Column(Text, nullable=True)
|
||||
warning_level = Column(SQLEnum(MappingWarningLevel), nullable=True)
|
||||
requires_explicit_approval = Column(Boolean, nullable=False, default=False)
|
||||
approval_state = Column(SQLEnum(ApprovalState), nullable=False, default=ApprovalState.NOT_REQUIRED)
|
||||
approved_by_user_id = Column(String, nullable=True)
|
||||
approved_at = Column(DateTime, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="execution_mappings")
|
||||
# [/DEF:ExecutionMapping:Class]
|
||||
|
||||
# [DEF:ClarificationStatus:Class]
|
||||
class ClarificationStatus(str, enum.Enum):
|
||||
PENDING = "pending"
|
||||
ACTIVE = "active"
|
||||
PAUSED = "paused"
|
||||
COMPLETED = "completed"
|
||||
CANCELLED = "cancelled"
|
||||
# [/DEF:ClarificationStatus:Class]
|
||||
|
||||
# [DEF:ClarificationSession:Class]
|
||||
class ClarificationSession(Base):
|
||||
__tablename__ = "clarification_sessions"
|
||||
|
||||
clarification_session_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
status = Column(SQLEnum(ClarificationStatus), nullable=False, default=ClarificationStatus.PENDING)
|
||||
current_question_id = Column(String, nullable=True)
|
||||
resolved_count = Column(Integer, nullable=False, default=0)
|
||||
remaining_count = Column(Integer, nullable=False, default=0)
|
||||
summary_delta = Column(Text, nullable=True)
|
||||
started_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
completed_at = Column(DateTime, nullable=True)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="clarification_sessions")
|
||||
questions = relationship("ClarificationQuestion", back_populates="clarification_session", cascade="all, delete-orphan")
|
||||
# [/DEF:ClarificationSession:Class]
|
||||
|
||||
# [DEF:QuestionState:Class]
|
||||
class QuestionState(str, enum.Enum):
|
||||
OPEN = "open"
|
||||
ANSWERED = "answered"
|
||||
SKIPPED = "skipped"
|
||||
EXPERT_REVIEW = "expert_review"
|
||||
SUPERSEDED = "superseded"
|
||||
# [/DEF:QuestionState:Class]
|
||||
|
||||
# [DEF:ClarificationQuestion:Class]
|
||||
class ClarificationQuestion(Base):
|
||||
__tablename__ = "clarification_questions"
|
||||
|
||||
question_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
clarification_session_id = Column(String, ForeignKey("clarification_sessions.clarification_session_id"), nullable=False)
|
||||
topic_ref = Column(String, nullable=False)
|
||||
question_text = Column(Text, nullable=False)
|
||||
why_it_matters = Column(Text, nullable=False)
|
||||
current_guess = Column(Text, nullable=True)
|
||||
priority = Column(Integer, nullable=False, default=0)
|
||||
state = Column(SQLEnum(QuestionState), nullable=False, default=QuestionState.OPEN)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
clarification_session = relationship("ClarificationSession", back_populates="questions")
|
||||
options = relationship("ClarificationOption", back_populates="question", cascade="all, delete-orphan")
|
||||
answer = relationship("ClarificationAnswer", back_populates="question", uselist=False, cascade="all, delete-orphan")
|
||||
# [/DEF:ClarificationQuestion:Class]
|
||||
|
||||
# [DEF:ClarificationOption:Class]
|
||||
class ClarificationOption(Base):
|
||||
__tablename__ = "clarification_options"
|
||||
|
||||
option_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
question_id = Column(String, ForeignKey("clarification_questions.question_id"), nullable=False)
|
||||
label = Column(String, nullable=False)
|
||||
value = Column(String, nullable=False)
|
||||
is_recommended = Column(Boolean, nullable=False, default=False)
|
||||
display_order = Column(Integer, nullable=False, default=0)
|
||||
|
||||
question = relationship("ClarificationQuestion", back_populates="options")
|
||||
# [/DEF:ClarificationOption:Class]
|
||||
|
||||
# [DEF:AnswerKind:Class]
|
||||
class AnswerKind(str, enum.Enum):
|
||||
SELECTED = "selected"
|
||||
CUSTOM = "custom"
|
||||
SKIPPED = "skipped"
|
||||
EXPERT_REVIEW = "expert_review"
|
||||
# [/DEF:AnswerKind:Class]
|
||||
|
||||
# [DEF:ClarificationAnswer:Class]
|
||||
class ClarificationAnswer(Base):
|
||||
__tablename__ = "clarification_answers"
|
||||
|
||||
answer_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
question_id = Column(String, ForeignKey("clarification_questions.question_id"), nullable=False, unique=True)
|
||||
answer_kind = Column(SQLEnum(AnswerKind), nullable=False)
|
||||
answer_value = Column(Text, nullable=True)
|
||||
answered_by_user_id = Column(String, nullable=False)
|
||||
impact_summary = Column(Text, nullable=True)
|
||||
user_feedback = Column(String, nullable=True) # up, down, null
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
question = relationship("ClarificationQuestion", back_populates="answer")
|
||||
# [/DEF:ClarificationAnswer:Class]
|
||||
|
||||
# [DEF:PreviewStatus:Class]
|
||||
class PreviewStatus(str, enum.Enum):
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
FAILED = "failed"
|
||||
STALE = "stale"
|
||||
# [/DEF:PreviewStatus:Class]
|
||||
|
||||
# [DEF:CompiledPreview:Class]
|
||||
class CompiledPreview(Base):
|
||||
__tablename__ = "compiled_previews"
|
||||
|
||||
preview_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
preview_status = Column(SQLEnum(PreviewStatus), nullable=False, default=PreviewStatus.PENDING)
|
||||
compiled_sql = Column(Text, nullable=True)
|
||||
preview_fingerprint = Column(String, nullable=False)
|
||||
compiled_by = Column(String, nullable=False, default="superset")
|
||||
error_code = Column(String, nullable=True)
|
||||
error_details = Column(Text, nullable=True)
|
||||
compiled_at = Column(DateTime, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="previews")
|
||||
# [/DEF:CompiledPreview:Class]
|
||||
|
||||
# [DEF:LaunchStatus:Class]
|
||||
class LaunchStatus(str, enum.Enum):
|
||||
STARTED = "started"
|
||||
SUCCESS = "success"
|
||||
FAILED = "failed"
|
||||
# [/DEF:LaunchStatus:Class]
|
||||
|
||||
# [DEF:DatasetRunContext:Class]
|
||||
class DatasetRunContext(Base):
|
||||
__tablename__ = "dataset_run_contexts"
|
||||
|
||||
run_context_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
dataset_ref = Column(String, nullable=False)
|
||||
environment_id = Column(String, nullable=False)
|
||||
preview_id = Column(String, nullable=False)
|
||||
sql_lab_session_ref = Column(String, nullable=False)
|
||||
effective_filters = Column(JSON, nullable=False)
|
||||
template_params = Column(JSON, nullable=False)
|
||||
approved_mapping_ids = Column(JSON, nullable=False)
|
||||
semantic_decision_refs = Column(JSON, nullable=False)
|
||||
open_warning_refs = Column(JSON, nullable=False)
|
||||
launch_status = Column(SQLEnum(LaunchStatus), nullable=False)
|
||||
launch_error = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="run_contexts")
|
||||
# [/DEF:DatasetRunContext:Class]
|
||||
|
||||
# [DEF:SessionEvent:Class]
|
||||
class SessionEvent(Base):
|
||||
__tablename__ = "session_events"
|
||||
|
||||
session_event_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
actor_user_id = Column(String, ForeignKey("users.id"), nullable=False)
|
||||
event_type = Column(String, nullable=False)
|
||||
event_summary = Column(Text, nullable=False)
|
||||
current_phase = Column(String, nullable=True)
|
||||
readiness_state = Column(String, nullable=True)
|
||||
event_details = Column(JSON, nullable=False, default=dict)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="events")
|
||||
actor = relationship("User")
|
||||
# [/DEF:SessionEvent:Class]
|
||||
|
||||
# [DEF:ArtifactType:Class]
|
||||
class ArtifactType(str, enum.Enum):
|
||||
DOCUMENTATION = "documentation"
|
||||
VALIDATION_REPORT = "validation_report"
|
||||
RUN_SUMMARY = "run_summary"
|
||||
# [/DEF:ArtifactType:Class]
|
||||
|
||||
# [DEF:ArtifactFormat:Class]
|
||||
class ArtifactFormat(str, enum.Enum):
|
||||
JSON = "json"
|
||||
MARKDOWN = "markdown"
|
||||
CSV = "csv"
|
||||
PDF = "pdf"
|
||||
# [/DEF:ArtifactFormat:Class]
|
||||
|
||||
# [DEF:ExportArtifact:Class]
|
||||
class ExportArtifact(Base):
|
||||
__tablename__ = "export_artifacts"
|
||||
|
||||
artifact_id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
session_id = Column(String, ForeignKey("dataset_review_sessions.session_id"), nullable=False)
|
||||
artifact_type = Column(SQLEnum(ArtifactType), nullable=False)
|
||||
format = Column(SQLEnum(ArtifactFormat), nullable=False)
|
||||
storage_ref = Column(String, nullable=False)
|
||||
created_by_user_id = Column(String, nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
session = relationship("DatasetReviewSession", back_populates="export_artifacts")
|
||||
# [/DEF:ExportArtifact:Class]
|
||||
|
||||
# [/DEF:DatasetReviewModels:Module]
|
||||
364
backend/src/schemas/dataset_review.py
Normal file
364
backend/src/schemas/dataset_review.py
Normal file
@@ -0,0 +1,364 @@
|
||||
# [DEF:DatasetReviewSchemas:Module]
|
||||
#
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: dataset_review, schemas, pydantic, session, profile, findings
|
||||
# @PURPOSE: Defines API schemas for the dataset review orchestration flow.
|
||||
# @LAYER: API
|
||||
# @RELATION: DEPENDS_ON -> [DatasetReviewModels]
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Any
|
||||
from pydantic import BaseModel, Field
|
||||
from src.models.dataset_review import (
|
||||
SessionStatus,
|
||||
SessionPhase,
|
||||
ReadinessState,
|
||||
RecommendedAction,
|
||||
SessionCollaboratorRole,
|
||||
BusinessSummarySource,
|
||||
ConfidenceState,
|
||||
FindingArea,
|
||||
FindingSeverity,
|
||||
ResolutionState,
|
||||
SemanticSourceType,
|
||||
TrustLevel,
|
||||
SemanticSourceStatus,
|
||||
FieldKind,
|
||||
FieldProvenance,
|
||||
CandidateMatchType,
|
||||
CandidateStatus,
|
||||
FilterSource,
|
||||
FilterConfidenceState,
|
||||
FilterRecoveryStatus,
|
||||
VariableKind,
|
||||
MappingStatus,
|
||||
MappingMethod,
|
||||
MappingWarningLevel,
|
||||
ApprovalState,
|
||||
ClarificationStatus,
|
||||
QuestionState,
|
||||
AnswerKind,
|
||||
PreviewStatus,
|
||||
LaunchStatus,
|
||||
ArtifactType,
|
||||
ArtifactFormat
|
||||
)
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:SessionCollaboratorDto:Class]
|
||||
class SessionCollaboratorDto(BaseModel):
|
||||
user_id: str
|
||||
role: SessionCollaboratorRole
|
||||
added_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SessionCollaboratorDto:Class]
|
||||
|
||||
# [DEF:DatasetProfileDto:Class]
|
||||
class DatasetProfileDto(BaseModel):
|
||||
profile_id: str
|
||||
session_id: str
|
||||
dataset_name: str
|
||||
schema_name: Optional[str] = None
|
||||
database_name: Optional[str] = None
|
||||
business_summary: str
|
||||
business_summary_source: BusinessSummarySource
|
||||
description: Optional[str] = None
|
||||
dataset_type: Optional[str] = None
|
||||
is_sqllab_view: bool
|
||||
completeness_score: Optional[float] = None
|
||||
confidence_state: ConfidenceState
|
||||
has_blocking_findings: bool
|
||||
has_warning_findings: bool
|
||||
manual_summary_locked: bool
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:DatasetProfileDto:Class]
|
||||
|
||||
# [DEF:ValidationFindingDto:Class]
|
||||
class ValidationFindingDto(BaseModel):
|
||||
finding_id: str
|
||||
session_id: str
|
||||
area: FindingArea
|
||||
severity: FindingSeverity
|
||||
code: str
|
||||
title: str
|
||||
message: str
|
||||
resolution_state: ResolutionState
|
||||
resolution_note: Optional[str] = None
|
||||
caused_by_ref: Optional[str] = None
|
||||
created_at: datetime
|
||||
resolved_at: Optional[datetime] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ValidationFindingDto:Class]
|
||||
|
||||
# [DEF:SemanticSourceDto:Class]
|
||||
class SemanticSourceDto(BaseModel):
|
||||
source_id: str
|
||||
session_id: str
|
||||
source_type: SemanticSourceType
|
||||
source_ref: str
|
||||
source_version: str
|
||||
display_name: str
|
||||
trust_level: TrustLevel
|
||||
schema_overlap_score: Optional[float] = None
|
||||
status: SemanticSourceStatus
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SemanticSourceDto:Class]
|
||||
|
||||
# [DEF:SemanticCandidateDto:Class]
|
||||
class SemanticCandidateDto(BaseModel):
|
||||
candidate_id: str
|
||||
field_id: str
|
||||
source_id: Optional[str] = None
|
||||
candidate_rank: int
|
||||
match_type: CandidateMatchType
|
||||
confidence_score: float
|
||||
proposed_verbose_name: Optional[str] = None
|
||||
proposed_description: Optional[str] = None
|
||||
proposed_display_format: Optional[str] = None
|
||||
status: CandidateStatus
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SemanticCandidateDto:Class]
|
||||
|
||||
# [DEF:SemanticFieldEntryDto:Class]
|
||||
class SemanticFieldEntryDto(BaseModel):
|
||||
field_id: str
|
||||
session_id: str
|
||||
field_name: str
|
||||
field_kind: FieldKind
|
||||
verbose_name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
display_format: Optional[str] = None
|
||||
provenance: FieldProvenance
|
||||
source_id: Optional[str] = None
|
||||
source_version: Optional[str] = None
|
||||
confidence_rank: Optional[int] = None
|
||||
is_locked: bool
|
||||
has_conflict: bool
|
||||
needs_review: bool
|
||||
last_changed_by: str
|
||||
user_feedback: Optional[str] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
candidates: List[SemanticCandidateDto] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SemanticFieldEntryDto:Class]
|
||||
|
||||
# [DEF:ImportedFilterDto:Class]
|
||||
class ImportedFilterDto(BaseModel):
|
||||
filter_id: str
|
||||
session_id: str
|
||||
filter_name: str
|
||||
display_name: Optional[str] = None
|
||||
raw_value: Any
|
||||
normalized_value: Optional[Any] = None
|
||||
source: FilterSource
|
||||
confidence_state: FilterConfidenceState
|
||||
requires_confirmation: bool
|
||||
recovery_status: FilterRecoveryStatus
|
||||
notes: Optional[str] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ImportedFilterDto:Class]
|
||||
|
||||
# [DEF:TemplateVariableDto:Class]
|
||||
class TemplateVariableDto(BaseModel):
|
||||
variable_id: str
|
||||
session_id: str
|
||||
variable_name: str
|
||||
expression_source: str
|
||||
variable_kind: VariableKind
|
||||
is_required: bool
|
||||
default_value: Optional[Any] = None
|
||||
mapping_status: MappingStatus
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:TemplateVariableDto:Class]
|
||||
|
||||
# [DEF:ExecutionMappingDto:Class]
|
||||
class ExecutionMappingDto(BaseModel):
|
||||
mapping_id: str
|
||||
session_id: str
|
||||
filter_id: str
|
||||
variable_id: str
|
||||
mapping_method: MappingMethod
|
||||
raw_input_value: Any
|
||||
effective_value: Optional[Any] = None
|
||||
transformation_note: Optional[str] = None
|
||||
warning_level: Optional[MappingWarningLevel] = None
|
||||
requires_explicit_approval: bool
|
||||
approval_state: ApprovalState
|
||||
approved_by_user_id: Optional[str] = None
|
||||
approved_at: Optional[datetime] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ExecutionMappingDto:Class]
|
||||
|
||||
# [DEF:ClarificationOptionDto:Class]
|
||||
class ClarificationOptionDto(BaseModel):
|
||||
option_id: str
|
||||
question_id: str
|
||||
label: str
|
||||
value: str
|
||||
is_recommended: bool
|
||||
display_order: int
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ClarificationOptionDto:Class]
|
||||
|
||||
# [DEF:ClarificationAnswerDto:Class]
|
||||
class ClarificationAnswerDto(BaseModel):
|
||||
answer_id: str
|
||||
question_id: str
|
||||
answer_kind: AnswerKind
|
||||
answer_value: Optional[str] = None
|
||||
answered_by_user_id: str
|
||||
impact_summary: Optional[str] = None
|
||||
user_feedback: Optional[str] = None
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ClarificationAnswerDto:Class]
|
||||
|
||||
# [DEF:ClarificationQuestionDto:Class]
|
||||
class ClarificationQuestionDto(BaseModel):
|
||||
question_id: str
|
||||
clarification_session_id: str
|
||||
topic_ref: str
|
||||
question_text: str
|
||||
why_it_matters: str
|
||||
current_guess: Optional[str] = None
|
||||
priority: int
|
||||
state: QuestionState
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
options: List[ClarificationOptionDto] = []
|
||||
answer: Optional[ClarificationAnswerDto] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ClarificationQuestionDto:Class]
|
||||
|
||||
# [DEF:ClarificationSessionDto:Class]
|
||||
class ClarificationSessionDto(BaseModel):
|
||||
clarification_session_id: str
|
||||
session_id: str
|
||||
status: ClarificationStatus
|
||||
current_question_id: Optional[str] = None
|
||||
resolved_count: int
|
||||
remaining_count: int
|
||||
summary_delta: Optional[str] = None
|
||||
started_at: datetime
|
||||
updated_at: datetime
|
||||
completed_at: Optional[datetime] = None
|
||||
questions: List[ClarificationQuestionDto] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:ClarificationSessionDto:Class]
|
||||
|
||||
# [DEF:CompiledPreviewDto:Class]
|
||||
class CompiledPreviewDto(BaseModel):
|
||||
preview_id: str
|
||||
session_id: str
|
||||
preview_status: PreviewStatus
|
||||
compiled_sql: Optional[str] = None
|
||||
preview_fingerprint: str
|
||||
compiled_by: str
|
||||
error_code: Optional[str] = None
|
||||
error_details: Optional[str] = None
|
||||
compiled_at: Optional[datetime] = None
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:CompiledPreviewDto:Class]
|
||||
|
||||
# [DEF:DatasetRunContextDto:Class]
|
||||
class DatasetRunContextDto(BaseModel):
|
||||
run_context_id: str
|
||||
session_id: str
|
||||
dataset_ref: str
|
||||
environment_id: str
|
||||
preview_id: str
|
||||
sql_lab_session_ref: str
|
||||
effective_filters: Any
|
||||
template_params: Any
|
||||
approved_mapping_ids: List[str]
|
||||
semantic_decision_refs: List[str]
|
||||
open_warning_refs: List[str]
|
||||
launch_status: LaunchStatus
|
||||
launch_error: Optional[str] = None
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:DatasetRunContextDto:Class]
|
||||
|
||||
# [DEF:SessionSummary:Class]
|
||||
class SessionSummary(BaseModel):
|
||||
session_id: str
|
||||
user_id: str
|
||||
environment_id: str
|
||||
source_kind: str
|
||||
source_input: str
|
||||
dataset_ref: str
|
||||
dataset_id: Optional[int] = None
|
||||
readiness_state: ReadinessState
|
||||
recommended_action: RecommendedAction
|
||||
status: SessionStatus
|
||||
current_phase: SessionPhase
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
last_activity_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SessionSummary:Class]
|
||||
|
||||
# [DEF:SessionDetail:Class]
|
||||
class SessionDetail(SessionSummary):
|
||||
collaborators: List[SessionCollaboratorDto] = []
|
||||
profile: Optional[DatasetProfileDto] = None
|
||||
findings: List[ValidationFindingDto] = []
|
||||
semantic_sources: List[SemanticSourceDto] = []
|
||||
semantic_fields: List[SemanticFieldEntryDto] = []
|
||||
imported_filters: List[ImportedFilterDto] = []
|
||||
template_variables: List[TemplateVariableDto] = []
|
||||
execution_mappings: List[ExecutionMappingDto] = []
|
||||
clarification_sessions: List[ClarificationSessionDto] = []
|
||||
previews: List[CompiledPreviewDto] = []
|
||||
run_contexts: List[DatasetRunContextDto] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:SessionDetail:Class]
|
||||
|
||||
# [/DEF:DatasetReviewSchemas:Module]
|
||||
@@ -11,7 +11,7 @@ from datetime import datetime
|
||||
# [DEF:DashboardHealthItem:Class]
|
||||
# @PURPOSE: Represents the latest health status of a single dashboard.
|
||||
class DashboardHealthItem(BaseModel):
|
||||
record_id: str
|
||||
record_id: Optional[str] = None
|
||||
dashboard_id: str
|
||||
dashboard_slug: Optional[str] = None
|
||||
dashboard_title: Optional[str] = None
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"changed_by_name": "Superset Admin",
|
||||
"changed_on": "2026-02-24T19:24:01.850617",
|
||||
"changed_on_delta_humanized": "7 days ago",
|
||||
"changed_on_delta_humanized": "20 days ago",
|
||||
"charts": [
|
||||
"TA-0001-001 test_chart"
|
||||
],
|
||||
@@ -19,7 +19,7 @@
|
||||
"id": 1,
|
||||
"last_name": "Admin"
|
||||
},
|
||||
"created_on_delta_humanized": "13 days ago",
|
||||
"created_on_delta_humanized": "26 days ago",
|
||||
"css": null,
|
||||
"dashboard_title": "TA-0001 Test dashboard",
|
||||
"id": 13,
|
||||
@@ -54,7 +54,7 @@
|
||||
"last_name": "Admin"
|
||||
},
|
||||
"changed_on": "2026-02-18T14:56:04.863722",
|
||||
"changed_on_humanized": "13 days ago",
|
||||
"changed_on_humanized": "26 days ago",
|
||||
"column_formats": {},
|
||||
"columns": [
|
||||
{
|
||||
@@ -424,7 +424,7 @@
|
||||
"last_name": "Admin"
|
||||
},
|
||||
"created_on": "2026-02-18T14:56:04.317950",
|
||||
"created_on_humanized": "13 days ago",
|
||||
"created_on_humanized": "26 days ago",
|
||||
"database": {
|
||||
"allow_multi_catalog": false,
|
||||
"backend": "postgresql",
|
||||
|
||||
@@ -46,6 +46,14 @@ INITIAL_PERMISSIONS = [
|
||||
{"resource": "plugin:storage", "action": "WRITE"},
|
||||
{"resource": "plugin:debug", "action": "EXECUTE"},
|
||||
{"resource": "git_config", "action": "READ"},
|
||||
|
||||
# Dataset Review Permissions
|
||||
{"resource": "dataset:session", "action": "READ"},
|
||||
{"resource": "dataset:session", "action": "MANAGE"},
|
||||
{"resource": "dataset:session", "action": "APPROVE"},
|
||||
{"resource": "dataset:execution", "action": "PREVIEW"},
|
||||
{"resource": "dataset:execution", "action": "LAUNCH"},
|
||||
{"resource": "dataset:execution", "action": "LAUNCH_PROD"},
|
||||
]
|
||||
# [/DEF:INITIAL_PERMISSIONS:Constant]
|
||||
|
||||
@@ -95,6 +103,10 @@ def seed_permissions():
|
||||
("tasks", "READ"),
|
||||
("tasks", "WRITE"),
|
||||
("git_config", "READ"),
|
||||
("dataset:session", "READ"),
|
||||
("dataset:session", "MANAGE"),
|
||||
("dataset:execution", "PREVIEW"),
|
||||
("dataset:execution", "LAUNCH"),
|
||||
]
|
||||
|
||||
for res, act in user_permissions:
|
||||
|
||||
@@ -31,11 +31,12 @@ from ...models.clean_release import (
|
||||
ComplianceRun,
|
||||
ComplianceStageRun,
|
||||
ComplianceViolation,
|
||||
CheckFinalStatus,
|
||||
)
|
||||
from .policy_engine import CleanPolicyEngine
|
||||
from .repository import CleanReleaseRepository
|
||||
from .stages import derive_final_status
|
||||
from ...core.logger import belief_scope
|
||||
from ...core.logger import belief_scope, logger
|
||||
|
||||
|
||||
# [DEF:CleanComplianceOrchestrator:Class]
|
||||
@@ -54,28 +55,71 @@ class CleanComplianceOrchestrator:
|
||||
|
||||
# [DEF:start_check_run:Function]
|
||||
# @PURPOSE: Initiate a new compliance run session.
|
||||
# @PRE: candidate_id/policy_id/manifest_id identify existing records in repository.
|
||||
# @PRE: candidate_id and policy_id are provided; legacy callers may omit persisted manifest/policy records.
|
||||
# @POST: Returns initialized ComplianceRun in RUNNING state persisted in repository.
|
||||
# @SIDE_EFFECT: Reads manifest/policy and writes new ComplianceRun via repository.save_check_run.
|
||||
# @DATA_CONTRACT: Input -> (candidate_id:str, policy_id:str, requested_by:str, manifest_id:str), Output -> ComplianceRun
|
||||
def start_check_run(self, candidate_id: str, policy_id: str, requested_by: str, manifest_id: str) -> ComplianceRun:
|
||||
# @SIDE_EFFECT: Reads manifest/policy when present and writes new ComplianceRun via repository.save_check_run.
|
||||
# @DATA_CONTRACT: Input -> (candidate_id:str, policy_id:str, requested_by:str, manifest_id:str|None), Output -> ComplianceRun
|
||||
def start_check_run(
|
||||
self,
|
||||
candidate_id: str,
|
||||
policy_id: str,
|
||||
requested_by: str | None = None,
|
||||
manifest_id: str | None = None,
|
||||
**legacy_kwargs,
|
||||
) -> ComplianceRun:
|
||||
with belief_scope("start_check_run"):
|
||||
manifest = self.repository.get_manifest(manifest_id)
|
||||
actor = requested_by or legacy_kwargs.get("triggered_by") or "system"
|
||||
execution_mode = str(legacy_kwargs.get("execution_mode") or "").strip().lower()
|
||||
manifest_id_value = manifest_id
|
||||
|
||||
if manifest_id_value and str(manifest_id_value).strip().lower() in {"tui", "api", "scheduler"}:
|
||||
logger.reason(
|
||||
"Detected legacy positional execution_mode passed through manifest_id slot",
|
||||
extra={"candidate_id": candidate_id, "execution_mode": manifest_id_value},
|
||||
)
|
||||
execution_mode = str(manifest_id_value).strip().lower()
|
||||
manifest_id_value = None
|
||||
|
||||
manifest = self.repository.get_manifest(manifest_id_value) if manifest_id_value else None
|
||||
policy = self.repository.get_policy(policy_id)
|
||||
if not manifest or not policy:
|
||||
|
||||
if manifest_id_value and manifest is None:
|
||||
logger.explore(
|
||||
"Manifest lookup missed during run start; rejecting explicit manifest contract",
|
||||
extra={"candidate_id": candidate_id, "manifest_id": manifest_id_value},
|
||||
)
|
||||
raise ValueError("Manifest or Policy not found")
|
||||
|
||||
if policy is None:
|
||||
logger.explore(
|
||||
"Policy lookup missed during run start; using compatibility placeholder snapshot",
|
||||
extra={"candidate_id": candidate_id, "policy_id": policy_id, "execution_mode": execution_mode or "unspecified"},
|
||||
)
|
||||
|
||||
manifest_id_value = manifest_id_value or f"manifest-{candidate_id}"
|
||||
manifest_digest = getattr(manifest, "manifest_digest", "pending")
|
||||
registry_snapshot_id = (
|
||||
getattr(policy, "registry_snapshot_id", None)
|
||||
or getattr(policy, "internal_source_registry_ref", None)
|
||||
or "pending"
|
||||
)
|
||||
|
||||
check_run = ComplianceRun(
|
||||
id=f"check-{uuid4()}",
|
||||
candidate_id=candidate_id,
|
||||
manifest_id=manifest_id,
|
||||
manifest_digest=manifest.manifest_digest,
|
||||
manifest_id=manifest_id_value,
|
||||
manifest_digest=manifest_digest,
|
||||
policy_snapshot_id=policy_id,
|
||||
registry_snapshot_id=policy.registry_snapshot_id,
|
||||
requested_by=requested_by,
|
||||
registry_snapshot_id=registry_snapshot_id,
|
||||
requested_by=actor,
|
||||
requested_at=datetime.now(timezone.utc),
|
||||
started_at=datetime.now(timezone.utc),
|
||||
status=RunStatus.RUNNING,
|
||||
)
|
||||
logger.reflect(
|
||||
"Initialized compliance run with compatibility-safe dependency placeholders",
|
||||
extra={"run_id": check_run.id, "candidate_id": candidate_id, "policy_id": policy_id},
|
||||
)
|
||||
return self.repository.save_check_run(check_run)
|
||||
# [/DEF:start_check_run:Function]
|
||||
|
||||
@@ -88,33 +132,46 @@ class CleanComplianceOrchestrator:
|
||||
def execute_stages(self, check_run: ComplianceRun, forced_results: Optional[List[ComplianceStageRun]] = None) -> ComplianceRun:
|
||||
with belief_scope("execute_stages"):
|
||||
if forced_results is not None:
|
||||
# In a real scenario, we'd persist these stages.
|
||||
for index, result in enumerate(forced_results, start=1):
|
||||
if isinstance(result, ComplianceStageRun):
|
||||
stage_run = result
|
||||
else:
|
||||
status_value = getattr(result, "status", None)
|
||||
if status_value == "PASS":
|
||||
decision = ComplianceDecision.PASSED.value
|
||||
elif status_value == "FAIL":
|
||||
decision = ComplianceDecision.BLOCKED.value
|
||||
else:
|
||||
decision = ComplianceDecision.ERROR.value
|
||||
stage_run = ComplianceStageRun(
|
||||
id=f"{check_run.id}-stage-{index}",
|
||||
run_id=check_run.id,
|
||||
stage_name=result.stage.value,
|
||||
status=result.status.value,
|
||||
decision=decision,
|
||||
details_json={"details": result.details},
|
||||
)
|
||||
self.repository.stage_runs[stage_run.id] = stage_run
|
||||
|
||||
check_run.final_status = derive_final_status(forced_results).value
|
||||
check_run.status = RunStatus.SUCCEEDED
|
||||
return self.repository.save_check_run(check_run)
|
||||
|
||||
# Real Logic Integration
|
||||
candidate = self.repository.get_candidate(check_run.candidate_id)
|
||||
policy = self.repository.get_policy(check_run.policy_snapshot_id)
|
||||
if not candidate or not policy:
|
||||
check_run.status = RunStatus.FAILED
|
||||
return self.repository.save_check_run(check_run)
|
||||
|
||||
registry = self.repository.get_registry(check_run.registry_snapshot_id)
|
||||
manifest = self.repository.get_manifest(check_run.manifest_id)
|
||||
|
||||
if not registry or not manifest:
|
||||
if not candidate or not policy or not registry or not manifest:
|
||||
check_run.status = RunStatus.FAILED
|
||||
check_run.finished_at = datetime.now(timezone.utc)
|
||||
return self.repository.save_check_run(check_run)
|
||||
|
||||
# Simulate stage execution and violation detection
|
||||
# 1. DATA_PURITY
|
||||
summary = manifest.content_json.get("summary", {})
|
||||
purity_ok = summary.get("prohibited_detected_count", 0) == 0
|
||||
|
||||
if not purity_ok:
|
||||
check_run.final_status = ComplianceDecision.BLOCKED
|
||||
else:
|
||||
check_run.final_status = ComplianceDecision.PASSED
|
||||
|
||||
check_run.final_status = (
|
||||
ComplianceDecision.PASSED.value if purity_ok else ComplianceDecision.BLOCKED.value
|
||||
)
|
||||
check_run.status = RunStatus.SUCCEEDED
|
||||
check_run.finished_at = datetime.now(timezone.utc)
|
||||
|
||||
@@ -129,9 +186,18 @@ class CleanComplianceOrchestrator:
|
||||
# @DATA_CONTRACT: Input -> ComplianceRun, Output -> ComplianceRun
|
||||
def finalize_run(self, check_run: ComplianceRun) -> ComplianceRun:
|
||||
with belief_scope("finalize_run"):
|
||||
# If not already set by execute_stages
|
||||
if check_run.status == RunStatus.FAILED:
|
||||
check_run.finished_at = datetime.now(timezone.utc)
|
||||
return self.repository.save_check_run(check_run)
|
||||
|
||||
if not check_run.final_status:
|
||||
check_run.final_status = ComplianceDecision.PASSED
|
||||
stage_results = [
|
||||
stage_run
|
||||
for stage_run in self.repository.stage_runs.values()
|
||||
if stage_run.run_id == check_run.id
|
||||
]
|
||||
derived = derive_final_status(stage_results)
|
||||
check_run.final_status = derived.value
|
||||
|
||||
check_run.status = RunStatus.SUCCEEDED
|
||||
check_run.finished_at = datetime.now(timezone.utc)
|
||||
|
||||
@@ -13,7 +13,12 @@ from dataclasses import dataclass
|
||||
from typing import Dict, Iterable, List, Tuple
|
||||
|
||||
from ...core.logger import belief_scope, logger
|
||||
from ...models.clean_release import CleanPolicySnapshot, SourceRegistrySnapshot
|
||||
from ...models.clean_release import (
|
||||
CleanPolicySnapshot,
|
||||
SourceRegistrySnapshot,
|
||||
CleanProfilePolicy,
|
||||
ResourceSourceRegistry,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -39,7 +44,11 @@ class SourceValidationResult:
|
||||
# @TEST_EDGE: external_endpoint -> endpoint not present in enabled internal registry entries
|
||||
# @TEST_INVARIANT: deterministic_classification -> VERIFIED_BY: [policy_valid]
|
||||
class CleanPolicyEngine:
|
||||
def __init__(self, policy: CleanPolicySnapshot, registry: SourceRegistrySnapshot):
|
||||
def __init__(
|
||||
self,
|
||||
policy: CleanPolicySnapshot | CleanProfilePolicy,
|
||||
registry: SourceRegistrySnapshot | ResourceSourceRegistry,
|
||||
):
|
||||
self.policy = policy
|
||||
self.registry = registry
|
||||
|
||||
@@ -48,11 +57,27 @@ class CleanPolicyEngine:
|
||||
logger.reason("Validating enterprise-clean policy and internal registry consistency")
|
||||
reasons: List[str] = []
|
||||
|
||||
# Snapshots are immutable and assumed active if resolved by facade
|
||||
if not self.policy.registry_snapshot_id.strip():
|
||||
reasons.append("Policy missing registry_snapshot_id")
|
||||
registry_ref = (
|
||||
getattr(self.policy, "registry_snapshot_id", None)
|
||||
or getattr(self.policy, "internal_source_registry_ref", "")
|
||||
or ""
|
||||
)
|
||||
if not str(registry_ref).strip():
|
||||
reasons.append("Policy missing internal_source_registry_ref")
|
||||
|
||||
content = dict(getattr(self.policy, "content_json", None) or {})
|
||||
if not content:
|
||||
content = {
|
||||
"profile": getattr(getattr(self.policy, "profile", None), "value", getattr(self.policy, "profile", "standard")),
|
||||
"prohibited_artifact_categories": list(
|
||||
getattr(self.policy, "prohibited_artifact_categories", []) or []
|
||||
),
|
||||
"required_system_categories": list(
|
||||
getattr(self.policy, "required_system_categories", []) or []
|
||||
),
|
||||
"external_source_forbidden": getattr(self.policy, "external_source_forbidden", False),
|
||||
}
|
||||
|
||||
content = self.policy.content_json or {}
|
||||
profile = content.get("profile", "standard")
|
||||
|
||||
if profile == "enterprise-clean":
|
||||
@@ -61,10 +86,16 @@ class CleanPolicyEngine:
|
||||
if not content.get("external_source_forbidden"):
|
||||
reasons.append("Enterprise policy requires external_source_forbidden=true")
|
||||
|
||||
if self.registry.id != self.policy.registry_snapshot_id:
|
||||
registry_id = getattr(self.registry, "id", None) or getattr(self.registry, "registry_id", None)
|
||||
if registry_id != registry_ref:
|
||||
reasons.append("Policy registry ref does not match provided registry")
|
||||
|
||||
if not self.registry.allowed_hosts:
|
||||
allowed_hosts = getattr(self.registry, "allowed_hosts", None)
|
||||
if allowed_hosts is None:
|
||||
entries = getattr(self.registry, "entries", []) or []
|
||||
allowed_hosts = [entry.host for entry in entries if getattr(entry, "enabled", True)]
|
||||
|
||||
if not allowed_hosts:
|
||||
reasons.append("Registry must contain allowed hosts")
|
||||
|
||||
logger.reflect(f"Policy validation completed. blocking_reasons={len(reasons)}")
|
||||
@@ -72,7 +103,16 @@ class CleanPolicyEngine:
|
||||
|
||||
def classify_artifact(self, artifact: Dict) -> str:
|
||||
category = (artifact.get("category") or "").strip()
|
||||
content = self.policy.content_json or {}
|
||||
content = dict(getattr(self.policy, "content_json", None) or {})
|
||||
if not content:
|
||||
content = {
|
||||
"required_system_categories": list(
|
||||
getattr(self.policy, "required_system_categories", []) or []
|
||||
),
|
||||
"prohibited_artifact_categories": list(
|
||||
getattr(self.policy, "prohibited_artifact_categories", []) or []
|
||||
),
|
||||
}
|
||||
|
||||
required = content.get("required_system_categories", [])
|
||||
prohibited = content.get("prohibited_artifact_categories", [])
|
||||
@@ -100,7 +140,11 @@ class CleanPolicyEngine:
|
||||
},
|
||||
)
|
||||
|
||||
allowed_hosts = set(self.registry.allowed_hosts or [])
|
||||
allowed_hosts = getattr(self.registry, "allowed_hosts", None)
|
||||
if allowed_hosts is None:
|
||||
entries = getattr(self.registry, "entries", []) or []
|
||||
allowed_hosts = [entry.host for entry in entries if getattr(entry, "enabled", True)]
|
||||
allowed_hosts = set(allowed_hosts or [])
|
||||
normalized = endpoint.strip().lower()
|
||||
|
||||
if normalized in allowed_hosts:
|
||||
|
||||
@@ -17,6 +17,7 @@ from .manifest_builder import build_distribution_manifest
|
||||
from .policy_engine import CleanPolicyEngine
|
||||
from .repository import CleanReleaseRepository
|
||||
from .enums import CandidateStatus
|
||||
from ...models.clean_release import ReleaseCandidateStatus
|
||||
|
||||
|
||||
def prepare_candidate(
|
||||
@@ -34,7 +35,11 @@ def prepare_candidate(
|
||||
if policy is None:
|
||||
raise ValueError("Active clean policy not found")
|
||||
|
||||
registry = repository.get_registry(policy.registry_snapshot_id)
|
||||
registry_ref = (
|
||||
getattr(policy, "registry_snapshot_id", None)
|
||||
or getattr(policy, "internal_source_registry_ref", None)
|
||||
)
|
||||
registry = repository.get_registry(registry_ref) if registry_ref else None
|
||||
if registry is None:
|
||||
raise ValueError("Registry not found for active policy")
|
||||
|
||||
@@ -48,22 +53,29 @@ def prepare_candidate(
|
||||
manifest = build_distribution_manifest(
|
||||
manifest_id=f"manifest-{candidate_id}",
|
||||
candidate_id=candidate_id,
|
||||
policy_id=policy.policy_id,
|
||||
policy_id=getattr(policy, "policy_id", None) or getattr(policy, "id", ""),
|
||||
generated_by=operator_id,
|
||||
artifacts=classified,
|
||||
)
|
||||
repository.save_manifest(manifest)
|
||||
|
||||
# Note: In the new model, BLOCKED is a ComplianceDecision, not a CandidateStatus.
|
||||
# CandidateStatus.PREPARED is the correct next state after preparation.
|
||||
candidate.transition_to(CandidateStatus.PREPARED)
|
||||
repository.save_candidate(candidate)
|
||||
current_status = getattr(candidate, "status", None)
|
||||
if violations:
|
||||
candidate.status = ReleaseCandidateStatus.BLOCKED.value
|
||||
repository.save_candidate(candidate)
|
||||
response_status = ReleaseCandidateStatus.BLOCKED.value
|
||||
else:
|
||||
if current_status in {CandidateStatus.DRAFT, CandidateStatus.DRAFT.value, "DRAFT"}:
|
||||
candidate.transition_to(CandidateStatus.PREPARED)
|
||||
else:
|
||||
candidate.status = ReleaseCandidateStatus.PREPARED.value
|
||||
repository.save_candidate(candidate)
|
||||
response_status = ReleaseCandidateStatus.PREPARED.value
|
||||
|
||||
status_value = candidate.status.value if hasattr(candidate.status, "value") else str(candidate.status)
|
||||
manifest_id_value = getattr(manifest, "manifest_id", None) or getattr(manifest, "id", "")
|
||||
return {
|
||||
"candidate_id": candidate_id,
|
||||
"status": status_value,
|
||||
"status": response_status,
|
||||
"manifest_id": manifest_id_value,
|
||||
"violations": violations,
|
||||
"prepared_at": datetime.now(timezone.utc).isoformat(),
|
||||
|
||||
@@ -11,7 +11,12 @@ from __future__ import annotations
|
||||
from typing import Dict, Iterable, List
|
||||
|
||||
from ..enums import ComplianceDecision, ComplianceStageName
|
||||
from ....models.clean_release import ComplianceStageRun
|
||||
from ....models.clean_release import (
|
||||
ComplianceStageRun,
|
||||
CheckFinalStatus,
|
||||
CheckStageResult,
|
||||
CheckStageStatus,
|
||||
)
|
||||
from .base import ComplianceStage
|
||||
from .data_purity import DataPurityStage
|
||||
from .internal_sources_only import InternalSourcesOnlyStage
|
||||
@@ -44,8 +49,34 @@ def build_default_stages() -> List[ComplianceStage]:
|
||||
# @PURPOSE: Convert stage result list to dictionary by stage name.
|
||||
# @PRE: stage_results may be empty or contain unique stage names.
|
||||
# @POST: Returns stage->status dictionary for downstream evaluation.
|
||||
def stage_result_map(stage_results: Iterable[ComplianceStageRun]) -> Dict[ComplianceStageName, ComplianceDecision]:
|
||||
return {ComplianceStageName(result.stage_name): ComplianceDecision(result.decision) for result in stage_results if result.decision}
|
||||
def stage_result_map(
|
||||
stage_results: Iterable[ComplianceStageRun | CheckStageResult],
|
||||
) -> Dict[ComplianceStageName, CheckStageStatus]:
|
||||
normalized: Dict[ComplianceStageName, CheckStageStatus] = {}
|
||||
for result in stage_results:
|
||||
if isinstance(result, CheckStageResult):
|
||||
normalized[ComplianceStageName(result.stage.value)] = CheckStageStatus(result.status.value)
|
||||
continue
|
||||
|
||||
stage_name = getattr(result, "stage_name", None)
|
||||
decision = getattr(result, "decision", None)
|
||||
status = getattr(result, "status", None)
|
||||
|
||||
if not stage_name:
|
||||
continue
|
||||
|
||||
normalized_stage = ComplianceStageName(stage_name)
|
||||
if decision == ComplianceDecision.BLOCKED:
|
||||
normalized[normalized_stage] = CheckStageStatus.FAIL
|
||||
elif decision == ComplianceDecision.ERROR:
|
||||
normalized[normalized_stage] = CheckStageStatus.SKIPPED
|
||||
elif decision == ComplianceDecision.PASSED:
|
||||
normalized[normalized_stage] = CheckStageStatus.PASS
|
||||
elif decision:
|
||||
normalized[normalized_stage] = CheckStageStatus(str(decision))
|
||||
elif status:
|
||||
normalized[normalized_stage] = CheckStageStatus(str(status))
|
||||
return normalized
|
||||
# [/DEF:stage_result_map:Function]
|
||||
|
||||
|
||||
@@ -53,7 +84,7 @@ def stage_result_map(stage_results: Iterable[ComplianceStageRun]) -> Dict[Compli
|
||||
# @PURPOSE: Identify mandatory stages that are absent from run results.
|
||||
# @PRE: stage_status_map contains zero or more known stage statuses.
|
||||
# @POST: Returns ordered list of missing mandatory stages.
|
||||
def missing_mandatory_stages(stage_status_map: Dict[ComplianceStageName, ComplianceDecision]) -> List[ComplianceStageName]:
|
||||
def missing_mandatory_stages(stage_status_map: Dict[ComplianceStageName, CheckStageStatus]) -> List[ComplianceStageName]:
|
||||
return [stage for stage in MANDATORY_STAGE_ORDER if stage not in stage_status_map]
|
||||
# [/DEF:missing_mandatory_stages:Function]
|
||||
|
||||
@@ -62,19 +93,19 @@ def missing_mandatory_stages(stage_status_map: Dict[ComplianceStageName, Complia
|
||||
# @PURPOSE: Derive final run status from stage results with deterministic blocking behavior.
|
||||
# @PRE: Stage statuses correspond to compliance checks.
|
||||
# @POST: Returns one of PASSED/BLOCKED/ERROR according to mandatory stage outcomes.
|
||||
def derive_final_status(stage_results: Iterable[ComplianceStageRun]) -> ComplianceDecision:
|
||||
def derive_final_status(stage_results: Iterable[ComplianceStageRun | CheckStageResult]) -> CheckFinalStatus:
|
||||
status_map = stage_result_map(stage_results)
|
||||
missing = missing_mandatory_stages(status_map)
|
||||
if missing:
|
||||
return ComplianceDecision.ERROR
|
||||
return CheckFinalStatus.FAILED
|
||||
|
||||
for stage in MANDATORY_STAGE_ORDER:
|
||||
decision = status_map.get(stage)
|
||||
if decision == ComplianceDecision.ERROR:
|
||||
return ComplianceDecision.ERROR
|
||||
if decision == ComplianceDecision.BLOCKED:
|
||||
return ComplianceDecision.BLOCKED
|
||||
if decision == CheckStageStatus.SKIPPED:
|
||||
return CheckFinalStatus.FAILED
|
||||
if decision == CheckStageStatus.FAIL:
|
||||
return CheckFinalStatus.BLOCKED
|
||||
|
||||
return ComplianceDecision.PASSED
|
||||
return CheckFinalStatus.COMPLIANT
|
||||
# [/DEF:derive_final_status:Function]
|
||||
# [/DEF:backend.src.services.clean_release.stages:Module]
|
||||
7
backend/src/services/dataset_review/__init__.py
Normal file
7
backend/src/services/dataset_review/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# [DEF:backend.src.services.dataset_review:Module]
|
||||
#
|
||||
# @SEMANTICS: dataset, review, orchestration
|
||||
# @PURPOSE: Provides services for dataset-centered orchestration flow.
|
||||
# @LAYER: Services
|
||||
#
|
||||
# [/DEF:backend.src.services.dataset_review:Module]
|
||||
552
backend/src/services/dataset_review/clarification_engine.py
Normal file
552
backend/src/services/dataset_review/clarification_engine.py
Normal file
@@ -0,0 +1,552 @@
|
||||
# [DEF:ClarificationEngine:Module]
|
||||
# @COMPLEXITY: 4
|
||||
# @SEMANTICS: dataset_review, clarification, question_payload, answer_persistence, readiness, findings
|
||||
# @PURPOSE: Manage one-question-at-a-time clarification state, deterministic answer persistence, and readiness/finding updates.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationSession]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationQuestion]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationAnswer]
|
||||
# @RELATION: [DEPENDS_ON] ->[ValidationFinding]
|
||||
# @PRE: Target session contains a persisted clarification aggregate in the current ownership scope.
|
||||
# @POST: Active clarification payload exposes one highest-priority unresolved question, and each recorded answer is persisted before pointer/readiness mutation.
|
||||
# @SIDE_EFFECT: Persists clarification answers, question/session states, and related readiness/finding changes.
|
||||
# @DATA_CONTRACT: Input[DatasetReviewSession|ClarificationAnswerCommand] -> Output[ClarificationStateResult]
|
||||
# @INVARIANT: Only one active clarification question may exist at a time; skipped and expert-review items remain unresolved and visible.
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:ClarificationEngine.imports:Block]
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.models.auth import User
|
||||
from src.models.dataset_review import (
|
||||
AnswerKind,
|
||||
ClarificationAnswer,
|
||||
ClarificationQuestion,
|
||||
ClarificationSession,
|
||||
ClarificationStatus,
|
||||
DatasetReviewSession,
|
||||
FindingArea,
|
||||
FindingSeverity,
|
||||
QuestionState,
|
||||
ReadinessState,
|
||||
RecommendedAction,
|
||||
ResolutionState,
|
||||
SessionPhase,
|
||||
ValidationFinding,
|
||||
)
|
||||
from src.services.dataset_review.repositories.session_repository import (
|
||||
DatasetReviewSessionRepository,
|
||||
)
|
||||
# [/DEF:ClarificationEngine.imports:Block]
|
||||
|
||||
|
||||
# [DEF:ClarificationQuestionPayload:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed active-question payload returned to the API layer.
|
||||
@dataclass
|
||||
class ClarificationQuestionPayload:
|
||||
question_id: str
|
||||
clarification_session_id: str
|
||||
topic_ref: str
|
||||
question_text: str
|
||||
why_it_matters: str
|
||||
current_guess: Optional[str]
|
||||
priority: int
|
||||
state: QuestionState
|
||||
options: list[dict[str, object]] = field(default_factory=list)
|
||||
# [/DEF:ClarificationQuestionPayload:Class]
|
||||
|
||||
|
||||
# [DEF:ClarificationStateResult:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Clarification state result carrying the current session, active payload, and changed findings.
|
||||
@dataclass
|
||||
class ClarificationStateResult:
|
||||
clarification_session: ClarificationSession
|
||||
current_question: Optional[ClarificationQuestionPayload]
|
||||
session: DatasetReviewSession
|
||||
changed_findings: List[ValidationFinding] = field(default_factory=list)
|
||||
# [/DEF:ClarificationStateResult:Class]
|
||||
|
||||
|
||||
# [DEF:ClarificationAnswerCommand:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed answer command for clarification state mutation.
|
||||
@dataclass
|
||||
class ClarificationAnswerCommand:
|
||||
session: DatasetReviewSession
|
||||
question_id: str
|
||||
answer_kind: AnswerKind
|
||||
answer_value: Optional[str]
|
||||
user: User
|
||||
# [/DEF:ClarificationAnswerCommand:Class]
|
||||
|
||||
|
||||
# [DEF:ClarificationEngine:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Provide deterministic one-question-at-a-time clarification selection and answer persistence.
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationSession]
|
||||
# @RELATION: [DEPENDS_ON] ->[ValidationFinding]
|
||||
# @PRE: Repository is bound to the current request transaction scope.
|
||||
# @POST: Returned clarification state is persistence-backed and aligned with session readiness/recommended action.
|
||||
# @SIDE_EFFECT: Mutates clarification answers, session flags, and related clarification findings.
|
||||
class ClarificationEngine:
|
||||
# [DEF:ClarificationEngine.__init__:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Bind repository dependency for clarification persistence operations.
|
||||
def __init__(self, repository: DatasetReviewSessionRepository) -> None:
|
||||
self.repository = repository
|
||||
# [/DEF:ClarificationEngine.__init__:Function]
|
||||
|
||||
# [DEF:ClarificationEngine.build_question_payload:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Return the one active highest-priority clarification question payload with why-it-matters, current guess, and options.
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationQuestion]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationOption]
|
||||
# @PRE: Session contains unresolved clarification state or a resumable clarification session.
|
||||
# @POST: Returns exactly one active/open question payload or None when no unresolved question remains.
|
||||
# @SIDE_EFFECT: Normalizes the active-question pointer and clarification status in persistence.
|
||||
# @DATA_CONTRACT: Input[DatasetReviewSession] -> Output[ClarificationQuestionPayload|None]
|
||||
def build_question_payload(
|
||||
self,
|
||||
session: DatasetReviewSession,
|
||||
) -> Optional[ClarificationQuestionPayload]:
|
||||
with belief_scope("ClarificationEngine.build_question_payload"):
|
||||
clarification_session = self._get_latest_clarification_session(session)
|
||||
if clarification_session is None:
|
||||
logger.reason(
|
||||
"Clarification payload requested without clarification session",
|
||||
extra={"session_id": session.session_id},
|
||||
)
|
||||
return None
|
||||
|
||||
active_questions = [
|
||||
question for question in clarification_session.questions
|
||||
if question.state == QuestionState.OPEN
|
||||
]
|
||||
active_questions.sort(key=lambda item: (-int(item.priority), item.created_at, item.question_id))
|
||||
|
||||
if not active_questions:
|
||||
clarification_session.current_question_id = None
|
||||
clarification_session.status = ClarificationStatus.COMPLETED
|
||||
session.readiness_state = self._derive_readiness_state(session)
|
||||
session.recommended_action = self._derive_recommended_action(session)
|
||||
if session.current_phase == SessionPhase.CLARIFICATION:
|
||||
session.current_phase = SessionPhase.REVIEW
|
||||
self.repository.db.commit()
|
||||
logger.reflect(
|
||||
"No unresolved clarification question remains",
|
||||
extra={"session_id": session.session_id},
|
||||
)
|
||||
return None
|
||||
|
||||
selected_question = active_questions[0]
|
||||
clarification_session.current_question_id = selected_question.question_id
|
||||
clarification_session.status = ClarificationStatus.ACTIVE
|
||||
session.readiness_state = ReadinessState.CLARIFICATION_ACTIVE
|
||||
session.recommended_action = RecommendedAction.ANSWER_NEXT_QUESTION
|
||||
session.current_phase = SessionPhase.CLARIFICATION
|
||||
|
||||
logger.reason(
|
||||
"Selected active clarification question",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"clarification_session_id": clarification_session.clarification_session_id,
|
||||
"question_id": selected_question.question_id,
|
||||
"priority": selected_question.priority,
|
||||
},
|
||||
)
|
||||
self.repository.db.commit()
|
||||
|
||||
payload = ClarificationQuestionPayload(
|
||||
question_id=selected_question.question_id,
|
||||
clarification_session_id=selected_question.clarification_session_id,
|
||||
topic_ref=selected_question.topic_ref,
|
||||
question_text=selected_question.question_text,
|
||||
why_it_matters=selected_question.why_it_matters,
|
||||
current_guess=selected_question.current_guess,
|
||||
priority=selected_question.priority,
|
||||
state=selected_question.state,
|
||||
options=[
|
||||
{
|
||||
"option_id": option.option_id,
|
||||
"question_id": option.question_id,
|
||||
"label": option.label,
|
||||
"value": option.value,
|
||||
"is_recommended": option.is_recommended,
|
||||
"display_order": option.display_order,
|
||||
}
|
||||
for option in sorted(
|
||||
selected_question.options,
|
||||
key=lambda item: (item.display_order, item.label, item.option_id),
|
||||
)
|
||||
],
|
||||
)
|
||||
logger.reflect(
|
||||
"Clarification payload built",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"question_id": payload.question_id,
|
||||
"option_count": len(payload.options),
|
||||
},
|
||||
)
|
||||
return payload
|
||||
# [/DEF:ClarificationEngine.build_question_payload:Function]
|
||||
|
||||
# [DEF:ClarificationEngine.record_answer:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist one clarification answer before any pointer/readiness mutation and compute deterministic state impact.
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationAnswer]
|
||||
# @RELATION: [DEPENDS_ON] ->[ValidationFinding]
|
||||
# @PRE: Target question belongs to the session's active clarification session and is still open.
|
||||
# @POST: Answer row is persisted before current-question pointer advances; skipped/expert-review items remain unresolved and visible.
|
||||
# @SIDE_EFFECT: Inserts answer row, mutates question/session states, updates clarification findings, and commits.
|
||||
# @DATA_CONTRACT: Input[ClarificationAnswerCommand] -> Output[ClarificationStateResult]
|
||||
def record_answer(self, command: ClarificationAnswerCommand) -> ClarificationStateResult:
|
||||
with belief_scope("ClarificationEngine.record_answer"):
|
||||
session = command.session
|
||||
clarification_session = self._get_latest_clarification_session(session)
|
||||
if clarification_session is None:
|
||||
logger.explore(
|
||||
"Cannot record clarification answer because no clarification session exists",
|
||||
extra={"session_id": session.session_id},
|
||||
)
|
||||
raise ValueError("Clarification session not found")
|
||||
|
||||
question = self._find_question(clarification_session, command.question_id)
|
||||
if question is None:
|
||||
logger.explore(
|
||||
"Cannot record clarification answer for foreign or missing question",
|
||||
extra={"session_id": session.session_id, "question_id": command.question_id},
|
||||
)
|
||||
raise ValueError("Clarification question not found")
|
||||
|
||||
if question.answer is not None:
|
||||
logger.explore(
|
||||
"Rejected duplicate clarification answer submission",
|
||||
extra={"session_id": session.session_id, "question_id": command.question_id},
|
||||
)
|
||||
raise ValueError("Clarification question already answered")
|
||||
|
||||
if clarification_session.current_question_id and clarification_session.current_question_id != question.question_id:
|
||||
logger.explore(
|
||||
"Rejected answer for non-active clarification question",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"question_id": question.question_id,
|
||||
"current_question_id": clarification_session.current_question_id,
|
||||
},
|
||||
)
|
||||
raise ValueError("Only the active clarification question can be answered")
|
||||
|
||||
normalized_answer_value = self._normalize_answer_value(command.answer_kind, command.answer_value, question)
|
||||
|
||||
logger.reason(
|
||||
"Persisting clarification answer before state advancement",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"question_id": question.question_id,
|
||||
"answer_kind": command.answer_kind.value,
|
||||
},
|
||||
)
|
||||
persisted_answer = ClarificationAnswer(
|
||||
question_id=question.question_id,
|
||||
answer_kind=command.answer_kind,
|
||||
answer_value=normalized_answer_value,
|
||||
answered_by_user_id=command.user.id,
|
||||
impact_summary=self._build_impact_summary(question, command.answer_kind, normalized_answer_value),
|
||||
)
|
||||
self.repository.db.add(persisted_answer)
|
||||
self.repository.db.flush()
|
||||
|
||||
changed_finding = self._upsert_clarification_finding(
|
||||
session=session,
|
||||
question=question,
|
||||
answer_kind=command.answer_kind,
|
||||
answer_value=normalized_answer_value,
|
||||
)
|
||||
|
||||
if command.answer_kind == AnswerKind.SELECTED:
|
||||
question.state = QuestionState.ANSWERED
|
||||
elif command.answer_kind == AnswerKind.CUSTOM:
|
||||
question.state = QuestionState.ANSWERED
|
||||
elif command.answer_kind == AnswerKind.SKIPPED:
|
||||
question.state = QuestionState.SKIPPED
|
||||
elif command.answer_kind == AnswerKind.EXPERT_REVIEW:
|
||||
question.state = QuestionState.EXPERT_REVIEW
|
||||
|
||||
question.updated_at = datetime.utcnow()
|
||||
self.repository.db.flush()
|
||||
|
||||
clarification_session.resolved_count = self._count_resolved_questions(clarification_session)
|
||||
clarification_session.remaining_count = self._count_remaining_questions(clarification_session)
|
||||
clarification_session.summary_delta = self.summarize_progress(clarification_session)
|
||||
clarification_session.updated_at = datetime.utcnow()
|
||||
|
||||
next_question = self._select_next_open_question(clarification_session)
|
||||
clarification_session.current_question_id = next_question.question_id if next_question else None
|
||||
clarification_session.status = (
|
||||
ClarificationStatus.ACTIVE if next_question else ClarificationStatus.COMPLETED
|
||||
)
|
||||
if clarification_session.status == ClarificationStatus.COMPLETED:
|
||||
clarification_session.completed_at = datetime.utcnow()
|
||||
|
||||
session.readiness_state = self._derive_readiness_state(session)
|
||||
session.recommended_action = self._derive_recommended_action(session)
|
||||
session.current_phase = (
|
||||
SessionPhase.CLARIFICATION
|
||||
if clarification_session.current_question_id
|
||||
else SessionPhase.REVIEW
|
||||
)
|
||||
session.last_activity_at = datetime.utcnow()
|
||||
|
||||
self.repository.db.commit()
|
||||
self.repository.db.refresh(session)
|
||||
|
||||
logger.reflect(
|
||||
"Clarification answer recorded and session advanced",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"question_id": question.question_id,
|
||||
"next_question_id": clarification_session.current_question_id,
|
||||
"readiness_state": session.readiness_state.value,
|
||||
"remaining_count": clarification_session.remaining_count,
|
||||
},
|
||||
)
|
||||
|
||||
return ClarificationStateResult(
|
||||
clarification_session=clarification_session,
|
||||
current_question=self.build_question_payload(session),
|
||||
session=session,
|
||||
changed_findings=[changed_finding] if changed_finding else [],
|
||||
)
|
||||
# [/DEF:ClarificationEngine.record_answer:Function]
|
||||
|
||||
# [DEF:ClarificationEngine.summarize_progress:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Produce a compact progress summary for pause/resume and completion UX.
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationSession]
|
||||
def summarize_progress(self, clarification_session: ClarificationSession) -> str:
|
||||
resolved = self._count_resolved_questions(clarification_session)
|
||||
remaining = self._count_remaining_questions(clarification_session)
|
||||
return f"{resolved} resolved, {remaining} unresolved"
|
||||
# [/DEF:ClarificationEngine.summarize_progress:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._get_latest_clarification_session:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Select the latest clarification session for the current dataset review aggregate.
|
||||
def _get_latest_clarification_session(
|
||||
self,
|
||||
session: DatasetReviewSession,
|
||||
) -> Optional[ClarificationSession]:
|
||||
if not session.clarification_sessions:
|
||||
return None
|
||||
ordered_sessions = sorted(
|
||||
session.clarification_sessions,
|
||||
key=lambda item: (item.started_at, item.clarification_session_id),
|
||||
reverse=True,
|
||||
)
|
||||
return ordered_sessions[0]
|
||||
# [/DEF:ClarificationEngine._get_latest_clarification_session:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._find_question:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Resolve a clarification question from the active clarification aggregate.
|
||||
def _find_question(
|
||||
self,
|
||||
clarification_session: ClarificationSession,
|
||||
question_id: str,
|
||||
) -> Optional[ClarificationQuestion]:
|
||||
for question in clarification_session.questions:
|
||||
if question.question_id == question_id:
|
||||
return question
|
||||
return None
|
||||
# [/DEF:ClarificationEngine._find_question:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._select_next_open_question:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Select the next unresolved question in deterministic priority order.
|
||||
def _select_next_open_question(
|
||||
self,
|
||||
clarification_session: ClarificationSession,
|
||||
) -> Optional[ClarificationQuestion]:
|
||||
open_questions = [
|
||||
question for question in clarification_session.questions
|
||||
if question.state == QuestionState.OPEN
|
||||
]
|
||||
if not open_questions:
|
||||
return None
|
||||
open_questions.sort(key=lambda item: (-int(item.priority), item.created_at, item.question_id))
|
||||
return open_questions[0]
|
||||
# [/DEF:ClarificationEngine._select_next_open_question:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._count_resolved_questions:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Count questions whose answers fully resolved the ambiguity.
|
||||
def _count_resolved_questions(self, clarification_session: ClarificationSession) -> int:
|
||||
return sum(
|
||||
1
|
||||
for question in clarification_session.questions
|
||||
if question.state == QuestionState.ANSWERED
|
||||
)
|
||||
# [/DEF:ClarificationEngine._count_resolved_questions:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._count_remaining_questions:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Count questions still unresolved or deferred after clarification interaction.
|
||||
def _count_remaining_questions(self, clarification_session: ClarificationSession) -> int:
|
||||
return sum(
|
||||
1
|
||||
for question in clarification_session.questions
|
||||
if question.state in {QuestionState.OPEN, QuestionState.SKIPPED, QuestionState.EXPERT_REVIEW}
|
||||
)
|
||||
# [/DEF:ClarificationEngine._count_remaining_questions:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._normalize_answer_value:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Validate and normalize answer payload based on answer kind and active question options.
|
||||
def _normalize_answer_value(
|
||||
self,
|
||||
answer_kind: AnswerKind,
|
||||
answer_value: Optional[str],
|
||||
question: ClarificationQuestion,
|
||||
) -> Optional[str]:
|
||||
normalized_answer_value = str(answer_value).strip() if answer_value is not None else None
|
||||
if answer_kind in {AnswerKind.SELECTED, AnswerKind.CUSTOM} and not normalized_answer_value:
|
||||
raise ValueError("answer_value is required for selected or custom clarification answers")
|
||||
if answer_kind == AnswerKind.SELECTED:
|
||||
allowed_values = {option.value for option in question.options}
|
||||
if normalized_answer_value not in allowed_values:
|
||||
raise ValueError("answer_value must match one of the current clarification options")
|
||||
if answer_kind == AnswerKind.SKIPPED:
|
||||
return normalized_answer_value or "skipped"
|
||||
if answer_kind == AnswerKind.EXPERT_REVIEW:
|
||||
return normalized_answer_value or "expert_review"
|
||||
return normalized_answer_value
|
||||
# [/DEF:ClarificationEngine._normalize_answer_value:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._build_impact_summary:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Build a compact audit note describing how the clarification answer affects session state.
|
||||
def _build_impact_summary(
|
||||
self,
|
||||
question: ClarificationQuestion,
|
||||
answer_kind: AnswerKind,
|
||||
answer_value: Optional[str],
|
||||
) -> str:
|
||||
if answer_kind == AnswerKind.SKIPPED:
|
||||
return f"Clarification for {question.topic_ref} was skipped and remains unresolved."
|
||||
if answer_kind == AnswerKind.EXPERT_REVIEW:
|
||||
return f"Clarification for {question.topic_ref} was deferred for expert review."
|
||||
return f"Clarification for {question.topic_ref} recorded as '{answer_value}'."
|
||||
# [/DEF:ClarificationEngine._build_impact_summary:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._upsert_clarification_finding:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Keep one finding per clarification topic aligned with answer outcome and unresolved visibility rules.
|
||||
# @RELATION: [DEPENDS_ON] ->[ValidationFinding]
|
||||
def _upsert_clarification_finding(
|
||||
self,
|
||||
session: DatasetReviewSession,
|
||||
question: ClarificationQuestion,
|
||||
answer_kind: AnswerKind,
|
||||
answer_value: Optional[str],
|
||||
) -> ValidationFinding:
|
||||
caused_by_ref = f"clarification:{question.question_id}"
|
||||
existing = next(
|
||||
(
|
||||
finding for finding in session.findings
|
||||
if finding.area == FindingArea.CLARIFICATION and finding.caused_by_ref == caused_by_ref
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if answer_kind in {AnswerKind.SELECTED, AnswerKind.CUSTOM}:
|
||||
resolution_state = ResolutionState.RESOLVED
|
||||
resolved_at = datetime.utcnow()
|
||||
message = f"Clarified '{question.topic_ref}' with answer '{answer_value}'."
|
||||
elif answer_kind == AnswerKind.SKIPPED:
|
||||
resolution_state = ResolutionState.SKIPPED
|
||||
resolved_at = None
|
||||
message = f"Clarification for '{question.topic_ref}' was skipped and still needs review."
|
||||
else:
|
||||
resolution_state = ResolutionState.EXPERT_REVIEW
|
||||
resolved_at = None
|
||||
message = f"Clarification for '{question.topic_ref}' requires expert review."
|
||||
|
||||
if existing is None:
|
||||
existing = ValidationFinding(
|
||||
finding_id=str(uuid.uuid4()),
|
||||
session_id=session.session_id,
|
||||
area=FindingArea.CLARIFICATION,
|
||||
severity=FindingSeverity.WARNING,
|
||||
code="CLARIFICATION_PENDING",
|
||||
title="Clarification pending",
|
||||
message=message,
|
||||
resolution_state=resolution_state,
|
||||
resolution_note=None,
|
||||
caused_by_ref=caused_by_ref,
|
||||
created_at=datetime.utcnow(),
|
||||
resolved_at=resolved_at,
|
||||
)
|
||||
self.repository.db.add(existing)
|
||||
session.findings.append(existing)
|
||||
else:
|
||||
existing.message = message
|
||||
existing.resolution_state = resolution_state
|
||||
existing.resolved_at = resolved_at
|
||||
|
||||
if answer_kind in {AnswerKind.SELECTED, AnswerKind.CUSTOM}:
|
||||
existing.code = "CLARIFICATION_RESOLVED"
|
||||
existing.title = "Clarification resolved"
|
||||
elif answer_kind == AnswerKind.SKIPPED:
|
||||
existing.code = "CLARIFICATION_SKIPPED"
|
||||
existing.title = "Clarification skipped"
|
||||
else:
|
||||
existing.code = "CLARIFICATION_EXPERT_REVIEW"
|
||||
existing.title = "Clarification requires expert review"
|
||||
|
||||
return existing
|
||||
# [/DEF:ClarificationEngine._upsert_clarification_finding:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._derive_readiness_state:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Recompute readiness after clarification mutation while preserving unresolved visibility semantics.
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationSession]
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
|
||||
def _derive_readiness_state(self, session: DatasetReviewSession) -> ReadinessState:
|
||||
clarification_session = self._get_latest_clarification_session(session)
|
||||
if clarification_session is None:
|
||||
return session.readiness_state
|
||||
|
||||
if clarification_session.current_question_id:
|
||||
return ReadinessState.CLARIFICATION_ACTIVE
|
||||
|
||||
if clarification_session.remaining_count > 0:
|
||||
return ReadinessState.CLARIFICATION_NEEDED
|
||||
|
||||
return ReadinessState.REVIEW_READY
|
||||
# [/DEF:ClarificationEngine._derive_readiness_state:Function]
|
||||
|
||||
# [DEF:ClarificationEngine._derive_recommended_action:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Recompute next-action guidance after clarification mutations.
|
||||
def _derive_recommended_action(self, session: DatasetReviewSession) -> RecommendedAction:
|
||||
clarification_session = self._get_latest_clarification_session(session)
|
||||
if clarification_session is None:
|
||||
return session.recommended_action
|
||||
if clarification_session.current_question_id:
|
||||
return RecommendedAction.ANSWER_NEXT_QUESTION
|
||||
if clarification_session.remaining_count > 0:
|
||||
return RecommendedAction.START_CLARIFICATION
|
||||
return RecommendedAction.REVIEW_DOCUMENTATION
|
||||
# [/DEF:ClarificationEngine._derive_recommended_action:Function]
|
||||
# [/DEF:ClarificationEngine:Class]
|
||||
|
||||
# [/DEF:ClarificationEngine:Module]
|
||||
158
backend/src/services/dataset_review/event_logger.py
Normal file
158
backend/src/services/dataset_review/event_logger.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# [DEF:SessionEventLoggerModule:Module]
|
||||
# @COMPLEXITY: 4
|
||||
# @SEMANTICS: dataset_review, audit, session_events, persistence, observability
|
||||
# @PURPOSE: Persist explicit session mutation events for dataset-review audit trails without weakening ownership or approval invariants.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionEvent]
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
|
||||
# @PRE: Caller provides an owned session scope and an authenticated actor identifier for each persisted mutation event.
|
||||
# @POST: Every logged event is committed as an explicit, queryable audit record with deterministic event metadata.
|
||||
# @SIDE_EFFECT: Inserts persisted session event rows and emits runtime belief-state logs for audit-sensitive mutations.
|
||||
# @DATA_CONTRACT: Input[SessionEventPayload] -> Output[SessionEvent]
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:SessionEventLoggerImports:Block]
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.models.dataset_review import DatasetReviewSession, SessionEvent
|
||||
# [/DEF:SessionEventLoggerImports:Block]
|
||||
|
||||
|
||||
# [DEF:SessionEventPayload:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed input contract for one persisted dataset-review session audit event.
|
||||
@dataclass(frozen=True)
|
||||
class SessionEventPayload:
|
||||
session_id: str
|
||||
actor_user_id: str
|
||||
event_type: str
|
||||
event_summary: str
|
||||
current_phase: Optional[str] = None
|
||||
readiness_state: Optional[str] = None
|
||||
event_details: Dict[str, Any] = field(default_factory=dict)
|
||||
# [/DEF:SessionEventPayload:Class]
|
||||
|
||||
|
||||
# [DEF:SessionEventLogger:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist explicit dataset-review session audit events with meaningful runtime reasoning logs.
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionEvent]
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionEventPayload]
|
||||
# @PRE: The database session is live and payload identifiers are non-empty.
|
||||
# @POST: Returns the committed session event row with a stable identifier and stored detail payload.
|
||||
# @SIDE_EFFECT: Writes one audit row to persistence and emits logger.reason/logger.reflect traces.
|
||||
# @DATA_CONTRACT: Input[SessionEventPayload] -> Output[SessionEvent]
|
||||
class SessionEventLogger:
|
||||
# [DEF:SessionEventLogger.__init__:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Bind a live SQLAlchemy session to the session-event logger.
|
||||
def __init__(self, db: Session) -> None:
|
||||
self.db = db
|
||||
# [/DEF:SessionEventLogger.__init__:Function]
|
||||
|
||||
# [DEF:SessionEventLogger.log_event:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist one explicit session event row for an owned dataset-review mutation.
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionEvent]
|
||||
# @PRE: session_id, actor_user_id, event_type, and event_summary are non-empty.
|
||||
# @POST: Returns the committed SessionEvent record with normalized detail payload.
|
||||
# @SIDE_EFFECT: Inserts and commits one session_events row.
|
||||
# @DATA_CONTRACT: Input[SessionEventPayload] -> Output[SessionEvent]
|
||||
def log_event(self, payload: SessionEventPayload) -> SessionEvent:
|
||||
with belief_scope("SessionEventLogger.log_event"):
|
||||
session_id = str(payload.session_id or "").strip()
|
||||
actor_user_id = str(payload.actor_user_id or "").strip()
|
||||
event_type = str(payload.event_type or "").strip()
|
||||
event_summary = str(payload.event_summary or "").strip()
|
||||
|
||||
if not session_id:
|
||||
logger.explore("Session event logging rejected because session_id is empty")
|
||||
raise ValueError("session_id must be non-empty")
|
||||
if not actor_user_id:
|
||||
logger.explore(
|
||||
"Session event logging rejected because actor_user_id is empty",
|
||||
extra={"session_id": session_id},
|
||||
)
|
||||
raise ValueError("actor_user_id must be non-empty")
|
||||
if not event_type:
|
||||
logger.explore(
|
||||
"Session event logging rejected because event_type is empty",
|
||||
extra={"session_id": session_id, "actor_user_id": actor_user_id},
|
||||
)
|
||||
raise ValueError("event_type must be non-empty")
|
||||
if not event_summary:
|
||||
logger.explore(
|
||||
"Session event logging rejected because event_summary is empty",
|
||||
extra={"session_id": session_id, "event_type": event_type},
|
||||
)
|
||||
raise ValueError("event_summary must be non-empty")
|
||||
|
||||
normalized_details = dict(payload.event_details or {})
|
||||
logger.reason(
|
||||
"Persisting explicit dataset-review session audit event",
|
||||
extra={
|
||||
"session_id": session_id,
|
||||
"actor_user_id": actor_user_id,
|
||||
"event_type": event_type,
|
||||
"current_phase": payload.current_phase,
|
||||
"readiness_state": payload.readiness_state,
|
||||
},
|
||||
)
|
||||
|
||||
event = SessionEvent(
|
||||
session_id=session_id,
|
||||
actor_user_id=actor_user_id,
|
||||
event_type=event_type,
|
||||
event_summary=event_summary,
|
||||
current_phase=payload.current_phase,
|
||||
readiness_state=payload.readiness_state,
|
||||
event_details=normalized_details,
|
||||
)
|
||||
self.db.add(event)
|
||||
self.db.commit()
|
||||
self.db.refresh(event)
|
||||
|
||||
logger.reflect(
|
||||
"Dataset-review session audit event persisted",
|
||||
extra={
|
||||
"session_id": session_id,
|
||||
"session_event_id": event.session_event_id,
|
||||
"event_type": event.event_type,
|
||||
},
|
||||
)
|
||||
return event
|
||||
# [/DEF:SessionEventLogger.log_event:Function]
|
||||
|
||||
# [DEF:SessionEventLogger.log_for_session:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Convenience wrapper for logging an event directly from a session aggregate root.
|
||||
# @RELATION: [CALLS] ->[SessionEventLogger.log_event]
|
||||
def log_for_session(
|
||||
self,
|
||||
session: DatasetReviewSession,
|
||||
*,
|
||||
actor_user_id: str,
|
||||
event_type: str,
|
||||
event_summary: str,
|
||||
event_details: Optional[Dict[str, Any]] = None,
|
||||
) -> SessionEvent:
|
||||
return self.log_event(
|
||||
SessionEventPayload(
|
||||
session_id=session.session_id,
|
||||
actor_user_id=actor_user_id,
|
||||
event_type=event_type,
|
||||
event_summary=event_summary,
|
||||
current_phase=session.current_phase.value if session.current_phase else None,
|
||||
readiness_state=session.readiness_state.value if session.readiness_state else None,
|
||||
event_details=dict(event_details or {}),
|
||||
)
|
||||
)
|
||||
# [/DEF:SessionEventLogger.log_for_session:Function]
|
||||
# [/DEF:SessionEventLogger:Class]
|
||||
|
||||
# [/DEF:SessionEventLoggerModule:Module]
|
||||
991
backend/src/services/dataset_review/orchestrator.py
Normal file
991
backend/src/services/dataset_review/orchestrator.py
Normal file
@@ -0,0 +1,991 @@
|
||||
# [DEF:DatasetReviewOrchestrator:Module]
|
||||
# @COMPLEXITY: 5
|
||||
# @SEMANTICS: dataset_review, orchestration, session_lifecycle, intake, recovery
|
||||
# @PURPOSE: Coordinate dataset review session startup and lifecycle-safe intake recovery for one authenticated user.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticSourceResolver]
|
||||
# @RELATION: [DEPENDS_ON] ->[ClarificationEngine]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetContextExtractor]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
# @RELATION: [DEPENDS_ON] ->[TaskManager]
|
||||
# @PRE: session mutations must execute inside a persisted session boundary scoped to one authenticated user.
|
||||
# @POST: state transitions are persisted atomically and emit observable progress for long-running steps.
|
||||
# @SIDE_EFFECT: creates task records, updates session aggregates, triggers upstream Superset calls, persists audit artifacts.
|
||||
# @DATA_CONTRACT: Input[SessionCommand] -> Output[DatasetReviewSession | CompiledPreview | DatasetRunContext]
|
||||
# @INVARIANT: Launch is blocked unless a current session has no open blocking findings, all launch-sensitive mappings are approved, and a non-stale Superset-generated compiled preview matches the current input fingerprint.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator.imports:Block]
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from src.core.config_manager import ConfigManager
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.core.task_manager import TaskManager
|
||||
from src.core.utils.superset_compilation_adapter import (
|
||||
PreviewCompilationPayload,
|
||||
SqlLabLaunchPayload,
|
||||
SupersetCompilationAdapter,
|
||||
)
|
||||
from src.core.utils.superset_context_extractor import (
|
||||
SupersetContextExtractor,
|
||||
SupersetParsedContext,
|
||||
)
|
||||
from src.models.auth import User
|
||||
from src.models.dataset_review import (
|
||||
ApprovalState,
|
||||
BusinessSummarySource,
|
||||
CompiledPreview,
|
||||
ConfidenceState,
|
||||
DatasetProfile,
|
||||
DatasetReviewSession,
|
||||
DatasetRunContext,
|
||||
ExecutionMapping,
|
||||
FilterConfidenceState,
|
||||
FilterRecoveryStatus,
|
||||
FilterSource,
|
||||
FindingArea,
|
||||
FindingSeverity,
|
||||
ImportedFilter,
|
||||
LaunchStatus,
|
||||
MappingMethod,
|
||||
MappingStatus,
|
||||
PreviewStatus,
|
||||
RecommendedAction,
|
||||
ReadinessState,
|
||||
ResolutionState,
|
||||
SessionPhase,
|
||||
SessionStatus,
|
||||
TemplateVariable,
|
||||
ValidationFinding,
|
||||
VariableKind,
|
||||
)
|
||||
from src.services.dataset_review.repositories.session_repository import (
|
||||
DatasetReviewSessionRepository,
|
||||
)
|
||||
from src.services.dataset_review.semantic_resolver import SemanticSourceResolver
|
||||
from src.services.dataset_review.event_logger import SessionEventPayload
|
||||
# [/DEF:DatasetReviewOrchestrator.imports:Block]
|
||||
|
||||
|
||||
# [DEF:StartSessionCommand:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed input contract for starting a dataset review session.
|
||||
@dataclass
|
||||
class StartSessionCommand:
|
||||
user: User
|
||||
environment_id: str
|
||||
source_kind: str
|
||||
source_input: str
|
||||
# [/DEF:StartSessionCommand:Class]
|
||||
|
||||
|
||||
# [DEF:StartSessionResult:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Session-start result carrying the persisted session and intake recovery metadata.
|
||||
@dataclass
|
||||
class StartSessionResult:
|
||||
session: DatasetReviewSession
|
||||
parsed_context: Optional[SupersetParsedContext] = None
|
||||
findings: List[ValidationFinding] = field(default_factory=list)
|
||||
# [/DEF:StartSessionResult:Class]
|
||||
|
||||
|
||||
# [DEF:PreparePreviewCommand:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed input contract for compiling one Superset-backed session preview.
|
||||
@dataclass
|
||||
class PreparePreviewCommand:
|
||||
user: User
|
||||
session_id: str
|
||||
# [/DEF:PreparePreviewCommand:Class]
|
||||
|
||||
|
||||
# [DEF:PreparePreviewResult:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Result contract for one persisted compiled preview attempt.
|
||||
@dataclass
|
||||
class PreparePreviewResult:
|
||||
session: DatasetReviewSession
|
||||
preview: CompiledPreview
|
||||
blocked_reasons: List[str] = field(default_factory=list)
|
||||
# [/DEF:PreparePreviewResult:Class]
|
||||
|
||||
|
||||
# [DEF:LaunchDatasetCommand:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Typed input contract for launching one dataset-review session into SQL Lab.
|
||||
@dataclass
|
||||
class LaunchDatasetCommand:
|
||||
user: User
|
||||
session_id: str
|
||||
# [/DEF:LaunchDatasetCommand:Class]
|
||||
|
||||
|
||||
# [DEF:LaunchDatasetResult:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Launch result carrying immutable run context and any gate blockers surfaced before launch.
|
||||
@dataclass
|
||||
class LaunchDatasetResult:
|
||||
session: DatasetReviewSession
|
||||
run_context: DatasetRunContext
|
||||
blocked_reasons: List[str] = field(default_factory=list)
|
||||
# [/DEF:LaunchDatasetResult:Class]
|
||||
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator:Class]
|
||||
# @COMPLEXITY: 5
|
||||
# @PURPOSE: Coordinate safe session startup while preserving cross-user isolation and explicit partial recovery.
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetContextExtractor]
|
||||
# @RELATION: [DEPENDS_ON] ->[TaskManager]
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionRepo]
|
||||
# @RELATION: [DEPENDS_ON] ->[ConfigManager]
|
||||
# @PRE: constructor dependencies are valid and tied to the current request/task scope.
|
||||
# @POST: orchestrator instance can execute session-scoped mutations for one authenticated user.
|
||||
# @SIDE_EFFECT: downstream operations may persist session/profile/finding state and enqueue background tasks.
|
||||
# @DATA_CONTRACT: Input[StartSessionCommand] -> Output[StartSessionResult]
|
||||
# @INVARIANT: session ownership is preserved on every mutation and recovery remains explicit when partial.
|
||||
class DatasetReviewOrchestrator:
|
||||
# [DEF:DatasetReviewOrchestrator.__init__:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Bind repository, config, and task dependencies required by the orchestration boundary.
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionRepo]
|
||||
# @RELATION: [DEPENDS_ON] ->[ConfigManager]
|
||||
def __init__(
|
||||
self,
|
||||
repository: DatasetReviewSessionRepository,
|
||||
config_manager: ConfigManager,
|
||||
task_manager: Optional[TaskManager] = None,
|
||||
semantic_resolver: Optional[SemanticSourceResolver] = None,
|
||||
) -> None:
|
||||
self.repository = repository
|
||||
self.config_manager = config_manager
|
||||
self.task_manager = task_manager
|
||||
self.semantic_resolver = semantic_resolver or SemanticSourceResolver()
|
||||
# [/DEF:DatasetReviewOrchestrator.__init__:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator.start_session:Function]
|
||||
# @COMPLEXITY: 5
|
||||
# @PURPOSE: Initialize a new session from a Superset link or dataset selection and trigger context recovery.
|
||||
# @RELATION: [DEPENDS_ON] ->[SessionRepo]
|
||||
# @RELATION: [CALLS] ->[SupersetContextExtractor.parse_superset_link]
|
||||
# @RELATION: [CALLS] ->[create_task]
|
||||
# @PRE: source input is non-empty and environment is accessible.
|
||||
# @POST: session exists in persisted storage with intake/recovery state and task linkage when async work is required.
|
||||
# @SIDE_EFFECT: persists session and may enqueue recovery task.
|
||||
# @DATA_CONTRACT: Input[StartSessionCommand] -> Output[StartSessionResult]
|
||||
# @INVARIANT: no cross-user session leakage occurs; session and follow-up task remain owned by the authenticated user.
|
||||
def start_session(self, command: StartSessionCommand) -> StartSessionResult:
|
||||
with belief_scope("DatasetReviewOrchestrator.start_session"):
|
||||
normalized_source_kind = str(command.source_kind or "").strip()
|
||||
normalized_source_input = str(command.source_input or "").strip()
|
||||
normalized_environment_id = str(command.environment_id or "").strip()
|
||||
|
||||
if not normalized_source_input:
|
||||
logger.explore("Blocked dataset review session start due to empty source input")
|
||||
raise ValueError("source_input must be non-empty")
|
||||
|
||||
if normalized_source_kind not in {"superset_link", "dataset_selection"}:
|
||||
logger.explore(
|
||||
"Blocked dataset review session start due to unsupported source kind",
|
||||
extra={"source_kind": normalized_source_kind},
|
||||
)
|
||||
raise ValueError("source_kind must be 'superset_link' or 'dataset_selection'")
|
||||
|
||||
environment = self.config_manager.get_environment(normalized_environment_id)
|
||||
if environment is None:
|
||||
logger.explore(
|
||||
"Blocked dataset review session start because environment was not found",
|
||||
extra={"environment_id": normalized_environment_id},
|
||||
)
|
||||
raise ValueError("Environment not found")
|
||||
|
||||
logger.reason(
|
||||
"Starting dataset review session",
|
||||
extra={
|
||||
"user_id": command.user.id,
|
||||
"environment_id": normalized_environment_id,
|
||||
"source_kind": normalized_source_kind,
|
||||
},
|
||||
)
|
||||
|
||||
parsed_context: Optional[SupersetParsedContext] = None
|
||||
findings: List[ValidationFinding] = []
|
||||
dataset_ref = normalized_source_input
|
||||
dataset_id: Optional[int] = None
|
||||
dashboard_id: Optional[int] = None
|
||||
readiness_state = ReadinessState.IMPORTING
|
||||
recommended_action = RecommendedAction.REVIEW_DOCUMENTATION
|
||||
current_phase = SessionPhase.RECOVERY
|
||||
|
||||
if normalized_source_kind == "superset_link":
|
||||
extractor = SupersetContextExtractor(environment)
|
||||
parsed_context = extractor.parse_superset_link(normalized_source_input)
|
||||
dataset_ref = parsed_context.dataset_ref
|
||||
dataset_id = parsed_context.dataset_id
|
||||
dashboard_id = parsed_context.dashboard_id
|
||||
|
||||
if parsed_context.partial_recovery:
|
||||
readiness_state = ReadinessState.RECOVERY_REQUIRED
|
||||
recommended_action = RecommendedAction.REVIEW_DOCUMENTATION
|
||||
findings.extend(self._build_partial_recovery_findings(parsed_context))
|
||||
else:
|
||||
readiness_state = ReadinessState.REVIEW_READY
|
||||
else:
|
||||
dataset_ref, dataset_id = self._parse_dataset_selection(normalized_source_input)
|
||||
readiness_state = ReadinessState.REVIEW_READY
|
||||
current_phase = SessionPhase.REVIEW
|
||||
|
||||
session = DatasetReviewSession(
|
||||
user_id=command.user.id,
|
||||
environment_id=normalized_environment_id,
|
||||
source_kind=normalized_source_kind,
|
||||
source_input=normalized_source_input,
|
||||
dataset_ref=dataset_ref,
|
||||
dataset_id=dataset_id,
|
||||
dashboard_id=dashboard_id,
|
||||
readiness_state=readiness_state,
|
||||
recommended_action=recommended_action,
|
||||
status=SessionStatus.ACTIVE,
|
||||
current_phase=current_phase,
|
||||
)
|
||||
persisted_session = self.repository.create_session(session)
|
||||
|
||||
recovered_filters: List[ImportedFilter] = []
|
||||
template_variables: List[TemplateVariable] = []
|
||||
execution_mappings: List[ExecutionMapping] = []
|
||||
if normalized_source_kind == "superset_link" and parsed_context is not None:
|
||||
recovered_filters, template_variables, execution_mappings, findings = self._build_recovery_bootstrap(
|
||||
environment=environment,
|
||||
session=persisted_session,
|
||||
parsed_context=parsed_context,
|
||||
findings=findings,
|
||||
)
|
||||
|
||||
profile = self._build_initial_profile(
|
||||
session_id=persisted_session.session_id,
|
||||
parsed_context=parsed_context,
|
||||
dataset_ref=dataset_ref,
|
||||
)
|
||||
self.repository.event_logger.log_event(
|
||||
SessionEventPayload(
|
||||
session_id=persisted_session.session_id,
|
||||
actor_user_id=command.user.id,
|
||||
event_type="session_started",
|
||||
event_summary="Dataset review session shell created",
|
||||
current_phase=persisted_session.current_phase.value,
|
||||
readiness_state=persisted_session.readiness_state.value,
|
||||
event_details={
|
||||
"source_kind": persisted_session.source_kind,
|
||||
"dataset_ref": persisted_session.dataset_ref,
|
||||
"dataset_id": persisted_session.dataset_id,
|
||||
"dashboard_id": persisted_session.dashboard_id,
|
||||
"partial_recovery": bool(parsed_context and parsed_context.partial_recovery),
|
||||
},
|
||||
)
|
||||
)
|
||||
persisted_session = self.repository.save_profile_and_findings(
|
||||
persisted_session.session_id,
|
||||
command.user.id,
|
||||
profile,
|
||||
findings,
|
||||
)
|
||||
if recovered_filters or template_variables or execution_mappings:
|
||||
persisted_session = self.repository.save_recovery_state(
|
||||
persisted_session.session_id,
|
||||
command.user.id,
|
||||
recovered_filters,
|
||||
template_variables,
|
||||
execution_mappings,
|
||||
)
|
||||
|
||||
active_task_id = self._enqueue_recovery_task(
|
||||
command=command,
|
||||
session=persisted_session,
|
||||
parsed_context=parsed_context,
|
||||
)
|
||||
if active_task_id:
|
||||
persisted_session.active_task_id = active_task_id
|
||||
self.repository.db.commit()
|
||||
self.repository.db.refresh(persisted_session)
|
||||
self.repository.event_logger.log_event(
|
||||
SessionEventPayload(
|
||||
session_id=persisted_session.session_id,
|
||||
actor_user_id=command.user.id,
|
||||
event_type="recovery_task_linked",
|
||||
event_summary="Recovery task linked to dataset review session",
|
||||
current_phase=persisted_session.current_phase.value,
|
||||
readiness_state=persisted_session.readiness_state.value,
|
||||
event_details={"task_id": active_task_id},
|
||||
)
|
||||
)
|
||||
logger.reason(
|
||||
"Linked recovery task to started dataset review session",
|
||||
extra={"session_id": persisted_session.session_id, "task_id": active_task_id},
|
||||
)
|
||||
|
||||
logger.reflect(
|
||||
"Dataset review session start completed",
|
||||
extra={
|
||||
"session_id": persisted_session.session_id,
|
||||
"dataset_ref": persisted_session.dataset_ref,
|
||||
"dataset_id": persisted_session.dataset_id,
|
||||
"dashboard_id": persisted_session.dashboard_id,
|
||||
"readiness_state": persisted_session.readiness_state.value,
|
||||
"active_task_id": persisted_session.active_task_id,
|
||||
"finding_count": len(findings),
|
||||
},
|
||||
)
|
||||
return StartSessionResult(
|
||||
session=persisted_session,
|
||||
parsed_context=parsed_context,
|
||||
findings=findings,
|
||||
)
|
||||
# [/DEF:DatasetReviewOrchestrator.start_session:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator.prepare_launch_preview:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Assemble effective execution inputs and trigger Superset-side preview compilation.
|
||||
# @RELATION: [CALLS] ->[SupersetCompilationAdapter.compile_preview]
|
||||
# @PRE: all required variables have candidate values or explicitly accepted defaults.
|
||||
# @POST: returns preview artifact in pending, ready, failed, or stale state.
|
||||
# @SIDE_EFFECT: persists preview attempt and upstream compilation diagnostics.
|
||||
# @DATA_CONTRACT: Input[PreparePreviewCommand] -> Output[PreparePreviewResult]
|
||||
def prepare_launch_preview(self, command: PreparePreviewCommand) -> PreparePreviewResult:
|
||||
with belief_scope("DatasetReviewOrchestrator.prepare_launch_preview"):
|
||||
session = self.repository.load_session_detail(command.session_id, command.user.id)
|
||||
if session is None or session.user_id != command.user.id:
|
||||
logger.explore(
|
||||
"Preview preparation rejected because owned session was not found",
|
||||
extra={"session_id": command.session_id, "user_id": command.user.id},
|
||||
)
|
||||
raise ValueError("Session not found")
|
||||
|
||||
if session.dataset_id is None:
|
||||
raise ValueError("Preview requires a resolved dataset_id")
|
||||
|
||||
environment = self.config_manager.get_environment(session.environment_id)
|
||||
if environment is None:
|
||||
raise ValueError("Environment not found")
|
||||
|
||||
execution_snapshot = self._build_execution_snapshot(session)
|
||||
preview_blockers = execution_snapshot["preview_blockers"]
|
||||
if preview_blockers:
|
||||
logger.explore(
|
||||
"Preview preparation blocked by incomplete execution context",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"blocked_reasons": preview_blockers,
|
||||
},
|
||||
)
|
||||
raise ValueError("Preview blocked: " + "; ".join(preview_blockers))
|
||||
|
||||
adapter = SupersetCompilationAdapter(environment)
|
||||
preview = adapter.compile_preview(
|
||||
PreviewCompilationPayload(
|
||||
session_id=session.session_id,
|
||||
dataset_id=session.dataset_id,
|
||||
preview_fingerprint=execution_snapshot["preview_fingerprint"],
|
||||
template_params=execution_snapshot["template_params"],
|
||||
effective_filters=execution_snapshot["effective_filters"],
|
||||
)
|
||||
)
|
||||
persisted_preview = self.repository.save_preview(
|
||||
session.session_id,
|
||||
command.user.id,
|
||||
preview,
|
||||
)
|
||||
|
||||
session.current_phase = SessionPhase.PREVIEW
|
||||
session.last_activity_at = datetime.utcnow()
|
||||
if persisted_preview.preview_status == PreviewStatus.READY:
|
||||
launch_blockers = self._build_launch_blockers(
|
||||
session=session,
|
||||
execution_snapshot=execution_snapshot,
|
||||
preview=persisted_preview,
|
||||
)
|
||||
if launch_blockers:
|
||||
session.readiness_state = ReadinessState.COMPILED_PREVIEW_READY
|
||||
session.recommended_action = RecommendedAction.APPROVE_MAPPING
|
||||
else:
|
||||
session.readiness_state = ReadinessState.RUN_READY
|
||||
session.recommended_action = RecommendedAction.LAUNCH_DATASET
|
||||
else:
|
||||
session.readiness_state = ReadinessState.PARTIALLY_READY
|
||||
session.recommended_action = RecommendedAction.GENERATE_SQL_PREVIEW
|
||||
self.repository.db.commit()
|
||||
self.repository.db.refresh(session)
|
||||
self.repository.event_logger.log_event(
|
||||
SessionEventPayload(
|
||||
session_id=session.session_id,
|
||||
actor_user_id=command.user.id,
|
||||
event_type="preview_generated",
|
||||
event_summary="Superset preview generation persisted",
|
||||
current_phase=session.current_phase.value,
|
||||
readiness_state=session.readiness_state.value,
|
||||
event_details={
|
||||
"preview_id": persisted_preview.preview_id,
|
||||
"preview_status": persisted_preview.preview_status.value,
|
||||
"preview_fingerprint": persisted_preview.preview_fingerprint,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
logger.reflect(
|
||||
"Superset preview preparation completed",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"preview_id": persisted_preview.preview_id,
|
||||
"preview_status": persisted_preview.preview_status.value,
|
||||
"preview_fingerprint": persisted_preview.preview_fingerprint,
|
||||
},
|
||||
)
|
||||
return PreparePreviewResult(
|
||||
session=session,
|
||||
preview=persisted_preview,
|
||||
blocked_reasons=[],
|
||||
)
|
||||
# [/DEF:DatasetReviewOrchestrator.prepare_launch_preview:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator.launch_dataset:Function]
|
||||
# @COMPLEXITY: 5
|
||||
# @PURPOSE: Start the approved dataset execution through SQL Lab and persist run context for audit/replay.
|
||||
# @RELATION: [CALLS] ->[SupersetCompilationAdapter.create_sql_lab_session]
|
||||
# @PRE: session is run-ready and compiled preview is current.
|
||||
# @POST: returns persisted run context with SQL Lab session reference and launch outcome.
|
||||
# @SIDE_EFFECT: creates SQL Lab execution session and audit snapshot.
|
||||
# @DATA_CONTRACT: Input[LaunchDatasetCommand] -> Output[LaunchDatasetResult]
|
||||
# @INVARIANT: launch remains blocked unless blocking findings are closed, approvals are satisfied, and the latest Superset preview fingerprint matches current execution inputs.
|
||||
def launch_dataset(self, command: LaunchDatasetCommand) -> LaunchDatasetResult:
|
||||
with belief_scope("DatasetReviewOrchestrator.launch_dataset"):
|
||||
session = self.repository.load_session_detail(command.session_id, command.user.id)
|
||||
if session is None or session.user_id != command.user.id:
|
||||
logger.explore(
|
||||
"Launch rejected because owned session was not found",
|
||||
extra={"session_id": command.session_id, "user_id": command.user.id},
|
||||
)
|
||||
raise ValueError("Session not found")
|
||||
|
||||
if session.dataset_id is None:
|
||||
raise ValueError("Launch requires a resolved dataset_id")
|
||||
|
||||
environment = self.config_manager.get_environment(session.environment_id)
|
||||
if environment is None:
|
||||
raise ValueError("Environment not found")
|
||||
|
||||
execution_snapshot = self._build_execution_snapshot(session)
|
||||
current_preview = self._get_latest_preview(session)
|
||||
launch_blockers = self._build_launch_blockers(
|
||||
session=session,
|
||||
execution_snapshot=execution_snapshot,
|
||||
preview=current_preview,
|
||||
)
|
||||
if launch_blockers:
|
||||
logger.explore(
|
||||
"Launch gate blocked dataset execution",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"blocked_reasons": launch_blockers,
|
||||
},
|
||||
)
|
||||
raise ValueError("Launch blocked: " + "; ".join(launch_blockers))
|
||||
|
||||
adapter = SupersetCompilationAdapter(environment)
|
||||
try:
|
||||
sql_lab_session_ref = adapter.create_sql_lab_session(
|
||||
SqlLabLaunchPayload(
|
||||
session_id=session.session_id,
|
||||
dataset_id=session.dataset_id,
|
||||
preview_id=current_preview.preview_id,
|
||||
compiled_sql=str(current_preview.compiled_sql or ""),
|
||||
template_params=execution_snapshot["template_params"],
|
||||
)
|
||||
)
|
||||
launch_status = LaunchStatus.STARTED
|
||||
launch_error = None
|
||||
except Exception as exc:
|
||||
logger.explore(
|
||||
"SQL Lab launch failed after passing gates",
|
||||
extra={"session_id": session.session_id, "error": str(exc)},
|
||||
)
|
||||
sql_lab_session_ref = "unavailable"
|
||||
launch_status = LaunchStatus.FAILED
|
||||
launch_error = str(exc)
|
||||
|
||||
run_context = DatasetRunContext(
|
||||
session_id=session.session_id,
|
||||
dataset_ref=session.dataset_ref,
|
||||
environment_id=session.environment_id,
|
||||
preview_id=current_preview.preview_id,
|
||||
sql_lab_session_ref=sql_lab_session_ref,
|
||||
effective_filters=execution_snapshot["effective_filters"],
|
||||
template_params=execution_snapshot["template_params"],
|
||||
approved_mapping_ids=execution_snapshot["approved_mapping_ids"],
|
||||
semantic_decision_refs=execution_snapshot["semantic_decision_refs"],
|
||||
open_warning_refs=execution_snapshot["open_warning_refs"],
|
||||
launch_status=launch_status,
|
||||
launch_error=launch_error,
|
||||
)
|
||||
persisted_run_context = self.repository.save_run_context(
|
||||
session.session_id,
|
||||
command.user.id,
|
||||
run_context,
|
||||
)
|
||||
|
||||
session.current_phase = SessionPhase.LAUNCH
|
||||
session.last_activity_at = datetime.utcnow()
|
||||
if launch_status == LaunchStatus.FAILED:
|
||||
session.readiness_state = ReadinessState.COMPILED_PREVIEW_READY
|
||||
session.recommended_action = RecommendedAction.LAUNCH_DATASET
|
||||
else:
|
||||
session.readiness_state = ReadinessState.RUN_IN_PROGRESS
|
||||
session.recommended_action = RecommendedAction.EXPORT_OUTPUTS
|
||||
self.repository.db.commit()
|
||||
self.repository.db.refresh(session)
|
||||
self.repository.event_logger.log_event(
|
||||
SessionEventPayload(
|
||||
session_id=session.session_id,
|
||||
actor_user_id=command.user.id,
|
||||
event_type="dataset_launch_requested",
|
||||
event_summary="Dataset launch handoff persisted",
|
||||
current_phase=session.current_phase.value,
|
||||
readiness_state=session.readiness_state.value,
|
||||
event_details={
|
||||
"run_context_id": persisted_run_context.run_context_id,
|
||||
"launch_status": persisted_run_context.launch_status.value,
|
||||
"preview_id": persisted_run_context.preview_id,
|
||||
"sql_lab_session_ref": persisted_run_context.sql_lab_session_ref,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
logger.reflect(
|
||||
"Dataset launch orchestration completed with audited run context",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"run_context_id": persisted_run_context.run_context_id,
|
||||
"launch_status": persisted_run_context.launch_status.value,
|
||||
"preview_id": persisted_run_context.preview_id,
|
||||
},
|
||||
)
|
||||
return LaunchDatasetResult(
|
||||
session=session,
|
||||
run_context=persisted_run_context,
|
||||
blocked_reasons=[],
|
||||
)
|
||||
# [/DEF:DatasetReviewOrchestrator.launch_dataset:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._parse_dataset_selection:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Normalize dataset-selection payload into canonical session references.
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
|
||||
def _parse_dataset_selection(self, source_input: str) -> tuple[str, Optional[int]]:
|
||||
normalized = str(source_input or "").strip()
|
||||
if not normalized:
|
||||
raise ValueError("dataset selection input must be non-empty")
|
||||
|
||||
if normalized.isdigit():
|
||||
dataset_id = int(normalized)
|
||||
return f"dataset:{dataset_id}", dataset_id
|
||||
|
||||
if normalized.startswith("dataset:"):
|
||||
suffix = normalized.split(":", 1)[1].strip()
|
||||
if suffix.isdigit():
|
||||
return normalized, int(suffix)
|
||||
return normalized, None
|
||||
|
||||
return normalized, None
|
||||
# [/DEF:DatasetReviewOrchestrator._parse_dataset_selection:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._build_initial_profile:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Create the first profile snapshot so exports and detail views remain usable immediately after intake.
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetProfile]
|
||||
def _build_initial_profile(
|
||||
self,
|
||||
session_id: str,
|
||||
parsed_context: Optional[SupersetParsedContext],
|
||||
dataset_ref: str,
|
||||
) -> DatasetProfile:
|
||||
dataset_name = dataset_ref.split(".")[-1] if dataset_ref else "Unresolved dataset"
|
||||
business_summary = (
|
||||
f"Review session initialized for {dataset_ref}."
|
||||
if dataset_ref
|
||||
else "Review session initialized with unresolved dataset context."
|
||||
)
|
||||
confidence_state = (
|
||||
ConfidenceState.MIXED
|
||||
if parsed_context and parsed_context.partial_recovery
|
||||
else ConfidenceState.MOSTLY_CONFIRMED
|
||||
)
|
||||
return DatasetProfile(
|
||||
session_id=session_id,
|
||||
dataset_name=dataset_name or "Unresolved dataset",
|
||||
schema_name=dataset_ref.split(".")[0] if "." in dataset_ref else None,
|
||||
business_summary=business_summary,
|
||||
business_summary_source=BusinessSummarySource.IMPORTED,
|
||||
description="Initial review profile created from source intake.",
|
||||
dataset_type="unknown",
|
||||
is_sqllab_view=False,
|
||||
completeness_score=0.25,
|
||||
confidence_state=confidence_state,
|
||||
has_blocking_findings=False,
|
||||
has_warning_findings=bool(parsed_context and parsed_context.partial_recovery),
|
||||
manual_summary_locked=False,
|
||||
)
|
||||
# [/DEF:DatasetReviewOrchestrator._build_initial_profile:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._build_partial_recovery_findings:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Project partial Superset intake recovery into explicit findings without blocking session usability.
|
||||
# @RELATION: [DEPENDS_ON] ->[ValidationFinding]
|
||||
# @PRE: parsed_context.partial_recovery is true.
|
||||
# @POST: returns warning-level findings that preserve usable but incomplete state.
|
||||
# @SIDE_EFFECT: none beyond structured finding creation.
|
||||
# @DATA_CONTRACT: Input[SupersetParsedContext] -> Output[List[ValidationFinding]]
|
||||
def _build_partial_recovery_findings(
|
||||
self,
|
||||
parsed_context: SupersetParsedContext,
|
||||
) -> List[ValidationFinding]:
|
||||
findings: List[ValidationFinding] = []
|
||||
for unresolved_ref in parsed_context.unresolved_references:
|
||||
findings.append(
|
||||
ValidationFinding(
|
||||
area=FindingArea.SOURCE_INTAKE,
|
||||
severity=FindingSeverity.WARNING,
|
||||
code="PARTIAL_SUPERSET_RECOVERY",
|
||||
title="Superset context recovered partially",
|
||||
message=(
|
||||
"Session remains usable, but some Superset context requires review: "
|
||||
f"{unresolved_ref.replace('_', ' ')}."
|
||||
),
|
||||
resolution_state=ResolutionState.OPEN,
|
||||
caused_by_ref=unresolved_ref,
|
||||
)
|
||||
)
|
||||
return findings
|
||||
# [/DEF:DatasetReviewOrchestrator._build_partial_recovery_findings:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._build_recovery_bootstrap:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Recover and materialize initial imported filters, template variables, and draft execution mappings after session creation.
|
||||
def _build_recovery_bootstrap(
|
||||
self,
|
||||
environment,
|
||||
session: DatasetReviewSession,
|
||||
parsed_context: SupersetParsedContext,
|
||||
findings: List[ValidationFinding],
|
||||
) -> tuple[List[ImportedFilter], List[TemplateVariable], List[ExecutionMapping], List[ValidationFinding]]:
|
||||
extractor = SupersetContextExtractor(environment)
|
||||
imported_filters_payload = extractor.recover_imported_filters(parsed_context)
|
||||
if imported_filters_payload is None:
|
||||
imported_filters_payload = []
|
||||
imported_filters = [
|
||||
ImportedFilter(
|
||||
session_id=session.session_id,
|
||||
filter_name=str(item.get("filter_name") or f"imported_filter_{index}"),
|
||||
display_name=item.get("display_name"),
|
||||
raw_value=item.get("raw_value"),
|
||||
normalized_value=item.get("normalized_value"),
|
||||
source=FilterSource(str(item.get("source") or FilterSource.SUPERSET_URL.value)),
|
||||
confidence_state=FilterConfidenceState(
|
||||
str(item.get("confidence_state") or FilterConfidenceState.UNRESOLVED.value)
|
||||
),
|
||||
requires_confirmation=bool(item.get("requires_confirmation", False)),
|
||||
recovery_status=FilterRecoveryStatus(
|
||||
str(item.get("recovery_status") or FilterRecoveryStatus.PARTIAL.value)
|
||||
),
|
||||
notes=item.get("notes"),
|
||||
)
|
||||
for index, item in enumerate(imported_filters_payload)
|
||||
]
|
||||
|
||||
template_variables: List[TemplateVariable] = []
|
||||
execution_mappings: List[ExecutionMapping] = []
|
||||
|
||||
if session.dataset_id is not None:
|
||||
try:
|
||||
dataset_payload = extractor.client.get_dataset_detail(session.dataset_id)
|
||||
discovered_variables = extractor.discover_template_variables(dataset_payload)
|
||||
template_variables = [
|
||||
TemplateVariable(
|
||||
session_id=session.session_id,
|
||||
variable_name=str(item.get("variable_name") or f"variable_{index}"),
|
||||
expression_source=str(item.get("expression_source") or ""),
|
||||
variable_kind=VariableKind(str(item.get("variable_kind") or VariableKind.UNKNOWN.value)),
|
||||
is_required=bool(item.get("is_required", True)),
|
||||
default_value=item.get("default_value"),
|
||||
mapping_status=MappingStatus(str(item.get("mapping_status") or MappingStatus.UNMAPPED.value)),
|
||||
)
|
||||
for index, item in enumerate(discovered_variables)
|
||||
]
|
||||
except Exception as exc:
|
||||
if "dataset_template_variable_discovery_failed" not in parsed_context.unresolved_references:
|
||||
parsed_context.unresolved_references.append("dataset_template_variable_discovery_failed")
|
||||
if not any(
|
||||
finding.caused_by_ref == "dataset_template_variable_discovery_failed"
|
||||
for finding in findings
|
||||
):
|
||||
findings.append(
|
||||
ValidationFinding(
|
||||
area=FindingArea.TEMPLATE_MAPPING,
|
||||
severity=FindingSeverity.WARNING,
|
||||
code="TEMPLATE_VARIABLE_DISCOVERY_FAILED",
|
||||
title="Template variables could not be discovered",
|
||||
message="Session remains usable, but dataset template variables still need review.",
|
||||
resolution_state=ResolutionState.OPEN,
|
||||
caused_by_ref="dataset_template_variable_discovery_failed",
|
||||
)
|
||||
)
|
||||
logger.explore(
|
||||
"Template variable discovery failed during session bootstrap",
|
||||
extra={"session_id": session.session_id, "dataset_id": session.dataset_id, "error": str(exc)},
|
||||
)
|
||||
|
||||
filter_lookup = {
|
||||
str(imported_filter.filter_name or "").strip().lower(): imported_filter
|
||||
for imported_filter in imported_filters
|
||||
if str(imported_filter.filter_name or "").strip()
|
||||
}
|
||||
for template_variable in template_variables:
|
||||
matched_filter = filter_lookup.get(str(template_variable.variable_name or "").strip().lower())
|
||||
if matched_filter is None:
|
||||
continue
|
||||
requires_explicit_approval = bool(
|
||||
matched_filter.requires_confirmation
|
||||
or matched_filter.recovery_status != FilterRecoveryStatus.RECOVERED
|
||||
)
|
||||
execution_mappings.append(
|
||||
ExecutionMapping(
|
||||
session_id=session.session_id,
|
||||
filter_id=matched_filter.filter_id,
|
||||
variable_id=template_variable.variable_id,
|
||||
mapping_method=MappingMethod.DIRECT_MATCH,
|
||||
raw_input_value=matched_filter.raw_value,
|
||||
effective_value=matched_filter.normalized_value if matched_filter.normalized_value is not None else matched_filter.raw_value,
|
||||
transformation_note="Bootstrapped from Superset recovery context",
|
||||
warning_level=None if not requires_explicit_approval else None,
|
||||
requires_explicit_approval=requires_explicit_approval,
|
||||
approval_state=ApprovalState.PENDING if requires_explicit_approval else ApprovalState.NOT_REQUIRED,
|
||||
approved_by_user_id=None,
|
||||
approved_at=None,
|
||||
)
|
||||
)
|
||||
|
||||
return imported_filters, template_variables, execution_mappings, findings
|
||||
# [/DEF:DatasetReviewOrchestrator._build_recovery_bootstrap:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._build_execution_snapshot:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Build effective filters, template params, approvals, and fingerprint for preview and launch gating.
|
||||
# @RELATION: [DEPENDS_ON] ->[DatasetReviewSession]
|
||||
# @PRE: Session aggregate includes imported filters, template variables, and current execution mappings.
|
||||
# @POST: returns deterministic execution snapshot for current session state without mutating persistence.
|
||||
# @SIDE_EFFECT: none.
|
||||
# @DATA_CONTRACT: Input[DatasetReviewSession] -> Output[Dict[str,Any]]
|
||||
def _build_execution_snapshot(self, session: DatasetReviewSession) -> Dict[str, Any]:
|
||||
filter_lookup = {item.filter_id: item for item in session.imported_filters}
|
||||
variable_lookup = {item.variable_id: item for item in session.template_variables}
|
||||
|
||||
effective_filters: List[Dict[str, Any]] = []
|
||||
template_params: Dict[str, Any] = {}
|
||||
approved_mapping_ids: List[str] = []
|
||||
open_warning_refs: List[str] = []
|
||||
preview_blockers: List[str] = []
|
||||
|
||||
for mapping in session.execution_mappings:
|
||||
imported_filter = filter_lookup.get(mapping.filter_id)
|
||||
template_variable = variable_lookup.get(mapping.variable_id)
|
||||
if imported_filter is None:
|
||||
preview_blockers.append(f"mapping:{mapping.mapping_id}:missing_filter")
|
||||
continue
|
||||
if template_variable is None:
|
||||
preview_blockers.append(f"mapping:{mapping.mapping_id}:missing_variable")
|
||||
continue
|
||||
|
||||
effective_value = mapping.effective_value
|
||||
if effective_value is None:
|
||||
effective_value = imported_filter.normalized_value
|
||||
if effective_value is None:
|
||||
effective_value = imported_filter.raw_value
|
||||
if effective_value is None:
|
||||
effective_value = template_variable.default_value
|
||||
|
||||
if effective_value is None and template_variable.is_required:
|
||||
preview_blockers.append(f"variable:{template_variable.variable_name}:missing_required_value")
|
||||
continue
|
||||
|
||||
effective_filters.append(
|
||||
{
|
||||
"mapping_id": mapping.mapping_id,
|
||||
"filter_id": imported_filter.filter_id,
|
||||
"filter_name": imported_filter.filter_name,
|
||||
"variable_id": template_variable.variable_id,
|
||||
"variable_name": template_variable.variable_name,
|
||||
"effective_value": effective_value,
|
||||
"raw_input_value": mapping.raw_input_value,
|
||||
}
|
||||
)
|
||||
template_params[template_variable.variable_name] = effective_value
|
||||
if mapping.approval_state == ApprovalState.APPROVED:
|
||||
approved_mapping_ids.append(mapping.mapping_id)
|
||||
if mapping.requires_explicit_approval and mapping.approval_state != ApprovalState.APPROVED:
|
||||
open_warning_refs.append(mapping.mapping_id)
|
||||
|
||||
mapped_variable_ids = {mapping.variable_id for mapping in session.execution_mappings}
|
||||
for variable in session.template_variables:
|
||||
if variable.variable_id in mapped_variable_ids:
|
||||
continue
|
||||
if variable.default_value is not None:
|
||||
template_params[variable.variable_name] = variable.default_value
|
||||
continue
|
||||
if variable.is_required:
|
||||
preview_blockers.append(f"variable:{variable.variable_name}:unmapped")
|
||||
|
||||
semantic_decision_refs = [
|
||||
field.field_id
|
||||
for field in session.semantic_fields
|
||||
if field.is_locked or not field.needs_review or field.provenance.value != "unresolved"
|
||||
]
|
||||
preview_fingerprint = self._compute_preview_fingerprint(
|
||||
{
|
||||
"dataset_id": session.dataset_id,
|
||||
"template_params": template_params,
|
||||
"effective_filters": effective_filters,
|
||||
}
|
||||
)
|
||||
return {
|
||||
"effective_filters": effective_filters,
|
||||
"template_params": template_params,
|
||||
"approved_mapping_ids": sorted(approved_mapping_ids),
|
||||
"semantic_decision_refs": sorted(semantic_decision_refs),
|
||||
"open_warning_refs": sorted(open_warning_refs),
|
||||
"preview_blockers": sorted(set(preview_blockers)),
|
||||
"preview_fingerprint": preview_fingerprint,
|
||||
}
|
||||
# [/DEF:DatasetReviewOrchestrator._build_execution_snapshot:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._build_launch_blockers:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Enforce launch gates from findings, approvals, and current preview truth.
|
||||
# @RELATION: [DEPENDS_ON] ->[CompiledPreview]
|
||||
# @PRE: execution_snapshot was computed from current session state and preview is the latest persisted preview or None.
|
||||
# @POST: returns explicit blocker codes for every unmet launch invariant.
|
||||
# @SIDE_EFFECT: none.
|
||||
# @DATA_CONTRACT: Input[DatasetReviewSession,Dict[str,Any],CompiledPreview|None] -> Output[List[str]]
|
||||
def _build_launch_blockers(
|
||||
self,
|
||||
session: DatasetReviewSession,
|
||||
execution_snapshot: Dict[str, Any],
|
||||
preview: Optional[CompiledPreview],
|
||||
) -> List[str]:
|
||||
blockers = list(execution_snapshot["preview_blockers"])
|
||||
|
||||
for finding in session.findings:
|
||||
if (
|
||||
finding.severity == FindingSeverity.BLOCKING
|
||||
and finding.resolution_state not in {ResolutionState.RESOLVED, ResolutionState.APPROVED}
|
||||
):
|
||||
blockers.append(f"finding:{finding.code}:blocking")
|
||||
for mapping in session.execution_mappings:
|
||||
if mapping.requires_explicit_approval and mapping.approval_state != ApprovalState.APPROVED:
|
||||
blockers.append(f"mapping:{mapping.mapping_id}:approval_required")
|
||||
|
||||
if preview is None:
|
||||
blockers.append("preview:missing")
|
||||
else:
|
||||
if preview.preview_status != PreviewStatus.READY:
|
||||
blockers.append(f"preview:{preview.preview_status.value}")
|
||||
if preview.preview_fingerprint != execution_snapshot["preview_fingerprint"]:
|
||||
blockers.append("preview:fingerprint_mismatch")
|
||||
|
||||
return sorted(set(blockers))
|
||||
# [/DEF:DatasetReviewOrchestrator._build_launch_blockers:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._get_latest_preview:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Resolve the current latest preview snapshot for one session aggregate.
|
||||
def _get_latest_preview(self, session: DatasetReviewSession) -> Optional[CompiledPreview]:
|
||||
if not session.previews:
|
||||
return None
|
||||
if session.last_preview_id:
|
||||
for preview in session.previews:
|
||||
if preview.preview_id == session.last_preview_id:
|
||||
return preview
|
||||
return sorted(
|
||||
session.previews,
|
||||
key=lambda item: (item.created_at or datetime.min, item.preview_id),
|
||||
reverse=True,
|
||||
)[0]
|
||||
# [/DEF:DatasetReviewOrchestrator._get_latest_preview:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._compute_preview_fingerprint:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Produce deterministic execution fingerprint for preview truth and staleness checks.
|
||||
def _compute_preview_fingerprint(self, payload: Dict[str, Any]) -> str:
|
||||
serialized = json.dumps(payload, sort_keys=True, default=str)
|
||||
return hashlib.sha256(serialized.encode("utf-8")).hexdigest()
|
||||
# [/DEF:DatasetReviewOrchestrator._compute_preview_fingerprint:Function]
|
||||
|
||||
# [DEF:DatasetReviewOrchestrator._enqueue_recovery_task:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Link session start to observable async recovery when task infrastructure is available.
|
||||
# @RELATION: [CALLS] ->[create_task]
|
||||
# @PRE: session is already persisted.
|
||||
# @POST: returns task identifier when a task could be enqueued, otherwise None.
|
||||
# @SIDE_EFFECT: may create one background task for progressive recovery.
|
||||
# @DATA_CONTRACT: Input[StartSessionCommand,DatasetReviewSession,SupersetParsedContext|None] -> Output[task_id:str|None]
|
||||
def _enqueue_recovery_task(
|
||||
self,
|
||||
command: StartSessionCommand,
|
||||
session: DatasetReviewSession,
|
||||
parsed_context: Optional[SupersetParsedContext],
|
||||
) -> Optional[str]:
|
||||
if self.task_manager is None:
|
||||
logger.reason(
|
||||
"Dataset review session started without task manager; continuing synchronously",
|
||||
extra={"session_id": session.session_id},
|
||||
)
|
||||
return None
|
||||
|
||||
task_params: Dict[str, Any] = {
|
||||
"session_id": session.session_id,
|
||||
"user_id": command.user.id,
|
||||
"environment_id": session.environment_id,
|
||||
"source_kind": session.source_kind,
|
||||
"source_input": session.source_input,
|
||||
"dataset_ref": session.dataset_ref,
|
||||
"dataset_id": session.dataset_id,
|
||||
"dashboard_id": session.dashboard_id,
|
||||
"partial_recovery": bool(parsed_context and parsed_context.partial_recovery),
|
||||
}
|
||||
|
||||
create_task = getattr(self.task_manager, "create_task", None)
|
||||
if create_task is None:
|
||||
logger.explore("Task manager has no create_task method; skipping recovery enqueue")
|
||||
return None
|
||||
|
||||
try:
|
||||
task_object = create_task(
|
||||
plugin_id="dataset-review-recovery",
|
||||
params=task_params,
|
||||
)
|
||||
except TypeError:
|
||||
logger.explore(
|
||||
"Recovery task enqueue skipped because task manager create_task contract is incompatible",
|
||||
extra={"session_id": session.session_id},
|
||||
)
|
||||
return None
|
||||
|
||||
task_id = getattr(task_object, "id", None)
|
||||
return str(task_id) if task_id else None
|
||||
# [/DEF:DatasetReviewOrchestrator._enqueue_recovery_task:Function]
|
||||
# [/DEF:DatasetReviewOrchestrator:Class]
|
||||
|
||||
# [/DEF:DatasetReviewOrchestrator:Module]
|
||||
@@ -0,0 +1,216 @@
|
||||
import pytest
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from src.models.mapping import Base, Environment
|
||||
from src.models.auth import User
|
||||
from src.models.dataset_review import (
|
||||
DatasetReviewSession,
|
||||
DatasetProfile,
|
||||
ValidationFinding,
|
||||
CompiledPreview,
|
||||
DatasetRunContext,
|
||||
BusinessSummarySource,
|
||||
ConfidenceState,
|
||||
FindingArea,
|
||||
FindingSeverity,
|
||||
ReadinessState,
|
||||
RecommendedAction,
|
||||
SessionCollaborator,
|
||||
SessionCollaboratorRole
|
||||
)
|
||||
from src.services.dataset_review.repositories.session_repository import DatasetReviewSessionRepository
|
||||
|
||||
# [DEF:SessionRepositoryTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Unit tests for DatasetReviewSessionRepository.
|
||||
# @RELATION: TESTS -> [DatasetReviewSessionRepository]
|
||||
|
||||
@pytest.fixture
|
||||
def db_session():
|
||||
# [DEF:db_session:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @RELATION: BINDS_TO -> [SessionRepositoryTests]
|
||||
engine = create_engine("sqlite:///:memory:")
|
||||
Base.metadata.create_all(engine)
|
||||
Session = sessionmaker(bind=engine)
|
||||
session = Session()
|
||||
|
||||
# Create test data
|
||||
user = User(id="user1", username="testuser", email="test@example.com", password_hash="pw")
|
||||
env = Environment(id="env1", name="Prod", url="http://superset", credentials_id="cred1")
|
||||
session.add_all([user, env])
|
||||
session.commit()
|
||||
|
||||
yield session
|
||||
session.close()
|
||||
|
||||
def test_create_session(db_session):
|
||||
# @PURPOSE: Verify session creation and persistence.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1",
|
||||
environment_id="env1",
|
||||
source_kind="superset_link",
|
||||
source_input="http://link",
|
||||
dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
assert session.session_id is not None
|
||||
loaded = db_session.query(DatasetReviewSession).filter_by(session_id=session.session_id).first()
|
||||
assert loaded.user_id == "user1"
|
||||
|
||||
def test_load_session_detail_ownership(db_session):
|
||||
# @PURPOSE: Verify ownership enforcement in detail loading.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1", environment_id="env1", source_kind="superset_link",
|
||||
source_input="http://link", dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
# Correct user
|
||||
loaded = repo.load_session_detail(session.session_id, "user1")
|
||||
assert loaded is not None
|
||||
|
||||
# Wrong user
|
||||
loaded_wrong = repo.load_session_detail(session.session_id, "wrong_user")
|
||||
assert loaded_wrong is None
|
||||
|
||||
def test_load_session_detail_collaborator(db_session):
|
||||
# @PURPOSE: Verify collaborator access in detail loading.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1", environment_id="env1", source_kind="superset_link",
|
||||
source_input="http://link", dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
# Add collaborator
|
||||
collab_user = User(id="collab1", username="collab", email="c@e.com", password_hash="p")
|
||||
db_session.add(collab_user)
|
||||
|
||||
collaborator = SessionCollaborator(
|
||||
session_id=session.session_id,
|
||||
user_id="collab1",
|
||||
role=SessionCollaboratorRole.REVIEWER
|
||||
)
|
||||
db_session.add(collaborator)
|
||||
db_session.commit()
|
||||
|
||||
# Collaborator access
|
||||
loaded = repo.load_session_detail(session.session_id, "collab1")
|
||||
assert loaded is not None
|
||||
assert loaded.session_id == session.session_id
|
||||
|
||||
def test_save_preview_marks_stale(db_session):
|
||||
# @PURPOSE: Verify that saving a new preview marks old ones as stale.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1", environment_id="env1", source_kind="superset_link",
|
||||
source_input="http://link", dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
p1 = CompiledPreview(session_id=session.session_id, preview_status="ready", preview_fingerprint="f1")
|
||||
repo.save_preview(session.session_id, "user1", p1)
|
||||
|
||||
p2 = CompiledPreview(session_id=session.session_id, preview_status="ready", preview_fingerprint="f2")
|
||||
repo.save_preview(session.session_id, "user1", p2)
|
||||
|
||||
db_session.refresh(p1)
|
||||
assert p1.preview_status == "stale"
|
||||
assert p2.preview_status == "ready"
|
||||
assert session.last_preview_id == p2.preview_id
|
||||
|
||||
def test_save_profile_and_findings(db_session):
|
||||
# @PURPOSE: Verify persistence of profile and findings.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1", environment_id="env1", source_kind="superset_link",
|
||||
source_input="http://link", dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
profile = DatasetProfile(
|
||||
session_id=session.session_id,
|
||||
dataset_name="Test DS",
|
||||
business_summary="Summary",
|
||||
business_summary_source=BusinessSummarySource.INFERRED,
|
||||
confidence_state=ConfidenceState.UNRESOLVED
|
||||
)
|
||||
|
||||
finding = ValidationFinding(
|
||||
session_id=session.session_id,
|
||||
area=FindingArea.SOURCE_INTAKE,
|
||||
severity=FindingSeverity.BLOCKING,
|
||||
code="ERR1",
|
||||
title="Error",
|
||||
message="Failure"
|
||||
)
|
||||
|
||||
repo.save_profile_and_findings(session.session_id, "user1", profile, [finding])
|
||||
|
||||
updated_session = repo.load_session_detail(session.session_id, "user1")
|
||||
assert updated_session.profile.dataset_name == "Test DS"
|
||||
assert len(updated_session.findings) == 1
|
||||
assert updated_session.findings[0].code == "ERR1"
|
||||
|
||||
# Verify removal of old findings
|
||||
new_finding = ValidationFinding(
|
||||
session_id=session.session_id,
|
||||
area=FindingArea.DATASET_PROFILE,
|
||||
severity=FindingSeverity.WARNING,
|
||||
code="WARN1",
|
||||
title="Warning",
|
||||
message="Something"
|
||||
)
|
||||
|
||||
repo.save_profile_and_findings(session.session_id, "user1", profile, [new_finding])
|
||||
|
||||
db_session.expire_all()
|
||||
final_session = repo.load_session_detail(session.session_id, "user1")
|
||||
assert len(final_session.findings) == 1
|
||||
assert final_session.findings[0].code == "WARN1"
|
||||
|
||||
def test_save_run_context(db_session):
|
||||
# @PURPOSE: Verify saving of run context.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
session = DatasetReviewSession(
|
||||
user_id="user1", environment_id="env1", source_kind="superset_link",
|
||||
source_input="http://link", dataset_ref="dataset1"
|
||||
)
|
||||
repo.create_session(session)
|
||||
|
||||
rc = DatasetRunContext(
|
||||
session_id=session.session_id,
|
||||
dataset_ref="ds1",
|
||||
environment_id="env1",
|
||||
preview_id="p1",
|
||||
sql_lab_session_ref="s1",
|
||||
effective_filters={},
|
||||
template_params={},
|
||||
approved_mapping_ids=[],
|
||||
semantic_decision_refs=[],
|
||||
open_warning_refs=[],
|
||||
launch_status="success"
|
||||
)
|
||||
repo.save_run_context(session.session_id, "user1", rc)
|
||||
|
||||
assert session.last_run_context_id == rc.run_context_id
|
||||
|
||||
def test_list_sessions_for_user(db_session):
|
||||
# @PURPOSE: Verify listing of sessions by user.
|
||||
repo = DatasetReviewSessionRepository(db_session)
|
||||
s1 = DatasetReviewSession(user_id="user1", environment_id="env1", source_kind="k", source_input="i", dataset_ref="r1")
|
||||
s2 = DatasetReviewSession(user_id="user1", environment_id="env1", source_kind="k", source_input="i", dataset_ref="r2")
|
||||
s3 = DatasetReviewSession(user_id="other", environment_id="env1", source_kind="k", source_input="i", dataset_ref="r3")
|
||||
|
||||
db_session.add_all([s1, s2, s3])
|
||||
db_session.commit()
|
||||
|
||||
sessions = repo.list_sessions_for_user("user1")
|
||||
assert len(sessions) == 2
|
||||
assert all(s.user_id == "user1" for s in sessions)
|
||||
|
||||
# [/DEF:SessionRepositoryTests:Module]
|
||||
@@ -0,0 +1,366 @@
|
||||
# [DEF:DatasetReviewSessionRepository:Module]
|
||||
# @COMPLEXITY: 5
|
||||
# @PURPOSE: Persist and retrieve dataset review session aggregates, including readiness, findings, semantic decisions, clarification state, previews, and run contexts.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetProfile]
|
||||
# @RELATION: [DEPENDS_ON] -> [ValidationFinding]
|
||||
# @RELATION: [DEPENDS_ON] -> [CompiledPreview]
|
||||
# @PRE: repository operations execute within authenticated request or task scope.
|
||||
# @POST: session aggregate reads are structurally consistent and writes preserve ownership and version semantics.
|
||||
# @SIDE_EFFECT: reads and writes SQLAlchemy-backed session aggregates.
|
||||
# @DATA_CONTRACT: Input[SessionMutation] -> Output[PersistedSessionAggregate]
|
||||
# @INVARIANT: answers, mapping approvals, preview artifacts, and launch snapshots are never attributed to the wrong user or session.
|
||||
|
||||
from typing import Optional, List
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.orm import Session, joinedload
|
||||
from src.models.dataset_review import (
|
||||
ClarificationQuestion,
|
||||
ClarificationSession,
|
||||
DatasetReviewSession,
|
||||
DatasetProfile,
|
||||
ValidationFinding,
|
||||
CompiledPreview,
|
||||
DatasetRunContext,
|
||||
ExecutionMapping,
|
||||
ImportedFilter,
|
||||
SemanticFieldEntry,
|
||||
SessionCollaborator,
|
||||
SessionEvent,
|
||||
TemplateVariable,
|
||||
)
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.services.dataset_review.event_logger import SessionEventLogger
|
||||
|
||||
# [DEF:SessionRepo:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Enforce ownership-scoped persistence and retrieval for dataset review session aggregates.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetProfile]
|
||||
# @RELATION: [DEPENDS_ON] -> [ValidationFinding]
|
||||
# @RELATION: [DEPENDS_ON] -> [CompiledPreview]
|
||||
# @PRE: constructor receives a live SQLAlchemy session and callers provide authenticated user scope for guarded reads and writes.
|
||||
# @POST: repository methods return ownership-scoped aggregates or persisted child records without changing domain meaning.
|
||||
# @SIDE_EFFECT: mutates and queries the persistence layer through the injected database session.
|
||||
# @DATA_CONTRACT: Input[OwnedSessionQuery|SessionMutation] -> Output[PersistedSessionAggregate|PersistedChildRecord]
|
||||
class DatasetReviewSessionRepository:
|
||||
|
||||
# [DEF:init_repo:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Bind one live SQLAlchemy session to the repository instance.
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
self.event_logger = SessionEventLogger(db)
|
||||
# [/DEF:init_repo:Function]
|
||||
|
||||
# [DEF:get_owned_session:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Resolve one owner-scoped dataset review session for mutation paths without leaking foreign-session state.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @PRE: session_id and user_id are non-empty identifiers from the authenticated ownership scope.
|
||||
# @POST: returns the owned session or raises a deterministic access error.
|
||||
# @SIDE_EFFECT: reads one session row from the current database transaction.
|
||||
# @DATA_CONTRACT: Input[OwnedSessionQuery] -> Output[DatasetReviewSession|ValueError]
|
||||
def _get_owned_session(self, session_id: str, user_id: str) -> DatasetReviewSession:
|
||||
with belief_scope("DatasetReviewSessionRepository.get_owned_session"):
|
||||
logger.reason(
|
||||
"Resolving owner-scoped dataset review session for mutation path",
|
||||
extra={"session_id": session_id, "user_id": user_id},
|
||||
)
|
||||
session = self.db.query(DatasetReviewSession).filter(
|
||||
DatasetReviewSession.session_id == session_id,
|
||||
DatasetReviewSession.user_id == user_id,
|
||||
).first()
|
||||
if not session:
|
||||
logger.explore(
|
||||
"Owner-scoped dataset review session lookup failed",
|
||||
extra={"session_id": session_id, "user_id": user_id},
|
||||
)
|
||||
raise ValueError("Session not found or access denied")
|
||||
logger.reflect(
|
||||
"Owner-scoped dataset review session resolved",
|
||||
extra={"session_id": session.session_id, "user_id": session.user_id},
|
||||
)
|
||||
return session
|
||||
# [/DEF:get_owned_session:Function]
|
||||
|
||||
# [DEF:create_sess:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist an initial dataset review session shell.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @PRE: session is a new aggregate root bound to the current ownership scope.
|
||||
# @POST: session is committed, refreshed, and returned with persisted identifiers.
|
||||
# @SIDE_EFFECT: inserts a session row and commits the active transaction.
|
||||
# @DATA_CONTRACT: Input[DatasetReviewSession] -> Output[DatasetReviewSession]
|
||||
def create_session(self, session: DatasetReviewSession) -> DatasetReviewSession:
|
||||
with belief_scope("DatasetReviewSessionRepository.create_session"):
|
||||
logger.reason(
|
||||
"Persisting dataset review session shell",
|
||||
extra={"user_id": session.user_id, "environment_id": session.environment_id},
|
||||
)
|
||||
self.db.add(session)
|
||||
self.db.commit()
|
||||
self.db.refresh(session)
|
||||
logger.reflect(
|
||||
"Dataset review session shell persisted with stable identifier",
|
||||
extra={"session_id": session.session_id, "user_id": session.user_id},
|
||||
)
|
||||
return session
|
||||
# [/DEF:create_sess:Function]
|
||||
|
||||
# [DEF:load_detail:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Return the full session aggregate for API and frontend resume flows.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [SessionCollaborator]
|
||||
def load_session_detail(self, session_id: str, user_id: str) -> Optional[DatasetReviewSession]:
|
||||
with belief_scope("DatasetReviewSessionRepository.load_session_detail"):
|
||||
logger.reason(
|
||||
"Loading dataset review session detail for owner-or-collaborator scope",
|
||||
extra={"session_id": session_id, "user_id": user_id},
|
||||
)
|
||||
session = self.db.query(DatasetReviewSession)\
|
||||
.outerjoin(SessionCollaborator, DatasetReviewSession.session_id == SessionCollaborator.session_id)\
|
||||
.options(
|
||||
joinedload(DatasetReviewSession.profile),
|
||||
joinedload(DatasetReviewSession.findings),
|
||||
joinedload(DatasetReviewSession.collaborators),
|
||||
joinedload(DatasetReviewSession.semantic_sources),
|
||||
joinedload(DatasetReviewSession.semantic_fields).joinedload(SemanticFieldEntry.candidates),
|
||||
joinedload(DatasetReviewSession.imported_filters),
|
||||
joinedload(DatasetReviewSession.template_variables),
|
||||
joinedload(DatasetReviewSession.execution_mappings),
|
||||
joinedload(DatasetReviewSession.clarification_sessions).joinedload(ClarificationSession.questions).joinedload(ClarificationQuestion.options),
|
||||
joinedload(DatasetReviewSession.clarification_sessions).joinedload(ClarificationSession.questions).joinedload(ClarificationQuestion.answer),
|
||||
joinedload(DatasetReviewSession.previews),
|
||||
joinedload(DatasetReviewSession.run_contexts),
|
||||
joinedload(DatasetReviewSession.events)
|
||||
)\
|
||||
.filter(DatasetReviewSession.session_id == session_id)\
|
||||
.filter(
|
||||
or_(
|
||||
DatasetReviewSession.user_id == user_id,
|
||||
SessionCollaborator.user_id == user_id
|
||||
)
|
||||
)\
|
||||
.first()
|
||||
logger.reflect(
|
||||
"Dataset review session detail lookup completed",
|
||||
extra={
|
||||
"session_id": session_id,
|
||||
"user_id": user_id,
|
||||
"found": bool(session),
|
||||
},
|
||||
)
|
||||
return session
|
||||
# [/DEF:load_detail:Function]
|
||||
|
||||
# [DEF:save_prof_find:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist profile state and replace validation findings for an owned session in one transaction.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetProfile]
|
||||
# @RELATION: [DEPENDS_ON] -> [ValidationFinding]
|
||||
# @PRE: session_id belongs to user_id and the supplied profile/findings belong to the same aggregate scope.
|
||||
# @POST: stored profile matches the current session and findings are replaced by the supplied collection.
|
||||
# @SIDE_EFFECT: updates profile rows, deletes stale findings, inserts current findings, and commits the transaction.
|
||||
# @DATA_CONTRACT: Input[ProfileAndFindingsMutation] -> Output[DatasetReviewSession]
|
||||
def save_profile_and_findings(self, session_id: str, user_id: str, profile: DatasetProfile, findings: List[ValidationFinding]) -> DatasetReviewSession:
|
||||
with belief_scope("DatasetReviewSessionRepository.save_profile_and_findings"):
|
||||
session = self._get_owned_session(session_id, user_id)
|
||||
logger.reason(
|
||||
"Persisting dataset profile and replacing validation findings",
|
||||
extra={
|
||||
"session_id": session_id,
|
||||
"user_id": user_id,
|
||||
"has_profile": bool(profile),
|
||||
"findings_count": len(findings),
|
||||
},
|
||||
)
|
||||
|
||||
if profile:
|
||||
existing_profile = self.db.query(DatasetProfile).filter_by(session_id=session_id).first()
|
||||
if existing_profile:
|
||||
profile.profile_id = existing_profile.profile_id
|
||||
self.db.merge(profile)
|
||||
|
||||
self.db.query(ValidationFinding).filter(
|
||||
ValidationFinding.session_id == session_id
|
||||
).delete()
|
||||
|
||||
for finding in findings:
|
||||
finding.session_id = session_id
|
||||
self.db.add(finding)
|
||||
|
||||
self.db.commit()
|
||||
logger.reflect(
|
||||
"Dataset profile and validation findings committed",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"user_id": user_id,
|
||||
"findings_count": len(findings),
|
||||
},
|
||||
)
|
||||
return self.load_session_detail(session_id, user_id)
|
||||
# [/DEF:save_prof_find:Function]
|
||||
|
||||
# [DEF:save_recovery_state:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist imported filters, template variables, and initial execution mappings for one owned session.
|
||||
# @RELATION: [DEPENDS_ON] -> [ImportedFilter]
|
||||
# @RELATION: [DEPENDS_ON] -> [TemplateVariable]
|
||||
# @RELATION: [DEPENDS_ON] -> [ExecutionMapping]
|
||||
def save_recovery_state(
|
||||
self,
|
||||
session_id: str,
|
||||
user_id: str,
|
||||
imported_filters: List[ImportedFilter],
|
||||
template_variables: List[TemplateVariable],
|
||||
execution_mappings: List[ExecutionMapping],
|
||||
) -> DatasetReviewSession:
|
||||
with belief_scope("DatasetReviewSessionRepository.save_recovery_state"):
|
||||
session = self._get_owned_session(session_id, user_id)
|
||||
logger.reason(
|
||||
"Persisting dataset review recovery bootstrap state",
|
||||
extra={
|
||||
"session_id": session_id,
|
||||
"user_id": user_id,
|
||||
"imported_filters_count": len(imported_filters),
|
||||
"template_variables_count": len(template_variables),
|
||||
"execution_mappings_count": len(execution_mappings),
|
||||
},
|
||||
)
|
||||
|
||||
self.db.query(ExecutionMapping).filter(
|
||||
ExecutionMapping.session_id == session_id
|
||||
).delete()
|
||||
self.db.query(TemplateVariable).filter(
|
||||
TemplateVariable.session_id == session_id
|
||||
).delete()
|
||||
self.db.query(ImportedFilter).filter(
|
||||
ImportedFilter.session_id == session_id
|
||||
).delete()
|
||||
|
||||
for imported_filter in imported_filters:
|
||||
imported_filter.session_id = session_id
|
||||
self.db.add(imported_filter)
|
||||
|
||||
for template_variable in template_variables:
|
||||
template_variable.session_id = session_id
|
||||
self.db.add(template_variable)
|
||||
|
||||
self.db.flush()
|
||||
|
||||
for execution_mapping in execution_mappings:
|
||||
execution_mapping.session_id = session_id
|
||||
self.db.add(execution_mapping)
|
||||
|
||||
self.db.commit()
|
||||
logger.reflect(
|
||||
"Dataset review recovery bootstrap state committed",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"user_id": user_id,
|
||||
"imported_filters_count": len(imported_filters),
|
||||
"template_variables_count": len(template_variables),
|
||||
"execution_mappings_count": len(execution_mappings),
|
||||
},
|
||||
)
|
||||
return self.load_session_detail(session_id, user_id)
|
||||
# [/DEF:save_recovery_state:Function]
|
||||
|
||||
# [DEF:save_prev:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist a preview snapshot and mark prior session previews stale.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [CompiledPreview]
|
||||
# @PRE: session_id belongs to user_id and preview is prepared for the same session aggregate.
|
||||
# @POST: preview is persisted and the session points to the latest preview identifier.
|
||||
# @SIDE_EFFECT: updates prior preview statuses, inserts a preview row, mutates the parent session, and commits.
|
||||
# @DATA_CONTRACT: Input[PreviewMutation] -> Output[CompiledPreview]
|
||||
def save_preview(self, session_id: str, user_id: str, preview: CompiledPreview) -> CompiledPreview:
|
||||
with belief_scope("DatasetReviewSessionRepository.save_preview"):
|
||||
session = self._get_owned_session(session_id, user_id)
|
||||
logger.reason(
|
||||
"Persisting compiled preview and staling previous preview snapshots",
|
||||
extra={"session_id": session_id, "user_id": user_id},
|
||||
)
|
||||
|
||||
self.db.query(CompiledPreview).filter(
|
||||
CompiledPreview.session_id == session_id
|
||||
).update({"preview_status": "stale"})
|
||||
|
||||
self.db.add(preview)
|
||||
self.db.flush()
|
||||
session.last_preview_id = preview.preview_id
|
||||
|
||||
self.db.commit()
|
||||
self.db.refresh(preview)
|
||||
logger.reflect(
|
||||
"Compiled preview committed as latest session preview",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"preview_id": preview.preview_id,
|
||||
"user_id": user_id,
|
||||
},
|
||||
)
|
||||
return preview
|
||||
# [/DEF:save_prev:Function]
|
||||
|
||||
# [DEF:save_run_ctx:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Persist an immutable launch audit snapshot for an owned session.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetRunContext]
|
||||
# @PRE: session_id belongs to user_id and run_context targets the same aggregate.
|
||||
# @POST: run context is persisted and linked as the latest launch snapshot for the session.
|
||||
# @SIDE_EFFECT: inserts a run-context row, mutates the parent session pointer, and commits.
|
||||
# @DATA_CONTRACT: Input[RunContextMutation] -> Output[DatasetRunContext]
|
||||
def save_run_context(self, session_id: str, user_id: str, run_context: DatasetRunContext) -> DatasetRunContext:
|
||||
with belief_scope("DatasetReviewSessionRepository.save_run_context"):
|
||||
session = self._get_owned_session(session_id, user_id)
|
||||
logger.reason(
|
||||
"Persisting dataset run context audit snapshot",
|
||||
extra={"session_id": session_id, "user_id": user_id},
|
||||
)
|
||||
|
||||
self.db.add(run_context)
|
||||
self.db.flush()
|
||||
session.last_run_context_id = run_context.run_context_id
|
||||
|
||||
self.db.commit()
|
||||
self.db.refresh(run_context)
|
||||
logger.reflect(
|
||||
"Dataset run context committed as latest launch snapshot",
|
||||
extra={
|
||||
"session_id": session.session_id,
|
||||
"run_context_id": run_context.run_context_id,
|
||||
"user_id": user_id,
|
||||
},
|
||||
)
|
||||
return run_context
|
||||
# [/DEF:save_run_ctx:Function]
|
||||
|
||||
# [DEF:list_user_sess:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: List review sessions owned by a specific user ordered by most recent update.
|
||||
# @RELATION: [DEPENDS_ON] -> [DatasetReviewSession]
|
||||
def list_sessions_for_user(self, user_id: str) -> List[DatasetReviewSession]:
|
||||
with belief_scope("DatasetReviewSessionRepository.list_sessions_for_user"):
|
||||
logger.reason(
|
||||
"Listing dataset review sessions for owner scope",
|
||||
extra={"user_id": user_id},
|
||||
)
|
||||
sessions = self.db.query(DatasetReviewSession).filter(
|
||||
DatasetReviewSession.user_id == user_id
|
||||
).order_by(DatasetReviewSession.updated_at.desc()).all()
|
||||
logger.reflect(
|
||||
"Dataset review session list assembled",
|
||||
extra={"user_id": user_id, "session_count": len(sessions)},
|
||||
)
|
||||
return sessions
|
||||
# [/DEF:list_user_sess:Function]
|
||||
# [/DEF:SessionRepo:Class]
|
||||
|
||||
# [/DEF:DatasetReviewSessionRepository:Module]
|
||||
400
backend/src/services/dataset_review/semantic_resolver.py
Normal file
400
backend/src/services/dataset_review/semantic_resolver.py
Normal file
@@ -0,0 +1,400 @@
|
||||
# [DEF:SemanticSourceResolver:Module]
|
||||
# @COMPLEXITY: 4
|
||||
# @SEMANTICS: dataset_review, semantic_resolution, dictionary, trusted_sources, ranking
|
||||
# @PURPOSE: Resolve and rank semantic candidates from trusted dictionary-like sources before any inferred fallback.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: [DEPENDS_ON] ->[LLMProviderService]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticSource]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticCandidate]
|
||||
# @PRE: selected source and target field set must be known.
|
||||
# @POST: candidate ranking follows the configured confidence hierarchy and unresolved fuzzy matches remain reviewable.
|
||||
# @SIDE_EFFECT: may create conflict findings and semantic candidate records.
|
||||
# @INVARIANT: Manual overrides are never silently replaced by imported, inferred, or AI-generated values.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# [DEF:SemanticSourceResolver.imports:Block]
|
||||
from dataclasses import dataclass, field
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Optional
|
||||
|
||||
from src.core.logger import belief_scope, logger
|
||||
from src.models.dataset_review import (
|
||||
CandidateMatchType,
|
||||
CandidateStatus,
|
||||
FieldProvenance,
|
||||
SemanticSource,
|
||||
)
|
||||
# [/DEF:SemanticSourceResolver.imports:Block]
|
||||
|
||||
|
||||
# [DEF:DictionaryResolutionResult:Class]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Carries field-level dictionary resolution output with explicit review and partial-recovery state.
|
||||
@dataclass
|
||||
class DictionaryResolutionResult:
|
||||
source_ref: str
|
||||
resolved_fields: List[Dict[str, Any]] = field(default_factory=list)
|
||||
unresolved_fields: List[str] = field(default_factory=list)
|
||||
partial_recovery: bool = False
|
||||
# [/DEF:DictionaryResolutionResult:Class]
|
||||
|
||||
|
||||
# [DEF:SemanticSourceResolver:Class]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Resolve semantic candidates from trusted sources while preserving manual locks and confidence ordering.
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticCandidate]
|
||||
# @PRE: source payload and target field collection are provided by the caller.
|
||||
# @POST: result contains confidence-ranked candidates and does not overwrite manual locks implicitly.
|
||||
# @SIDE_EFFECT: emits semantic trace logs for ranking and fallback decisions.
|
||||
class SemanticSourceResolver:
|
||||
# [DEF:SemanticSourceResolver.resolve_from_file:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalize uploaded semantic file records into field-level candidates.
|
||||
def resolve_from_file(self, source_payload: Mapping[str, Any], fields: Iterable[Mapping[str, Any]]) -> DictionaryResolutionResult:
|
||||
return DictionaryResolutionResult(source_ref=str(source_payload.get("source_ref") or "uploaded_file"))
|
||||
# [/DEF:SemanticSourceResolver.resolve_from_file:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.resolve_from_dictionary:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Resolve candidates from connected tabular dictionary sources.
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticCandidate]
|
||||
# @PRE: dictionary source exists and fields contain stable field_name values.
|
||||
# @POST: returns confidence-ranked candidates where exact dictionary matches outrank fuzzy matches and unresolved fields stay explicit.
|
||||
# @SIDE_EFFECT: emits belief-state logs describing trusted-match and partial-recovery outcomes.
|
||||
# @DATA_CONTRACT: Input[source_payload:Mapping,fields:Iterable] -> Output[DictionaryResolutionResult]
|
||||
def resolve_from_dictionary(
|
||||
self,
|
||||
source_payload: Mapping[str, Any],
|
||||
fields: Iterable[Mapping[str, Any]],
|
||||
) -> DictionaryResolutionResult:
|
||||
with belief_scope("SemanticSourceResolver.resolve_from_dictionary"):
|
||||
source_ref = str(source_payload.get("source_ref") or "").strip()
|
||||
dictionary_rows = source_payload.get("rows")
|
||||
|
||||
if not source_ref:
|
||||
logger.explore("Dictionary semantic source is missing source_ref")
|
||||
raise ValueError("Dictionary semantic source must include source_ref")
|
||||
|
||||
if not isinstance(dictionary_rows, list) or not dictionary_rows:
|
||||
logger.explore(
|
||||
"Dictionary semantic source has no usable rows",
|
||||
extra={"source_ref": source_ref},
|
||||
)
|
||||
raise ValueError("Dictionary semantic source must include non-empty rows")
|
||||
|
||||
logger.reason(
|
||||
"Resolving semantics from trusted dictionary source",
|
||||
extra={"source_ref": source_ref, "row_count": len(dictionary_rows)},
|
||||
)
|
||||
|
||||
normalized_rows = [self._normalize_dictionary_row(row) for row in dictionary_rows if isinstance(row, Mapping)]
|
||||
row_index = {
|
||||
row["field_key"]: row
|
||||
for row in normalized_rows
|
||||
if row.get("field_key")
|
||||
}
|
||||
|
||||
resolved_fields: List[Dict[str, Any]] = []
|
||||
unresolved_fields: List[str] = []
|
||||
|
||||
for raw_field in fields:
|
||||
field_name = str(raw_field.get("field_name") or "").strip()
|
||||
if not field_name:
|
||||
continue
|
||||
|
||||
is_locked = bool(raw_field.get("is_locked"))
|
||||
if is_locked:
|
||||
logger.reason(
|
||||
"Preserving manual lock during dictionary resolution",
|
||||
extra={"field_name": field_name},
|
||||
)
|
||||
resolved_fields.append(
|
||||
{
|
||||
"field_name": field_name,
|
||||
"applied_candidate": None,
|
||||
"candidates": [],
|
||||
"provenance": FieldProvenance.MANUAL_OVERRIDE.value,
|
||||
"needs_review": False,
|
||||
"has_conflict": False,
|
||||
"is_locked": True,
|
||||
"status": "preserved_manual",
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
exact_match = row_index.get(self._normalize_key(field_name))
|
||||
candidates: List[Dict[str, Any]] = []
|
||||
|
||||
if exact_match is not None:
|
||||
logger.reason(
|
||||
"Resolved exact dictionary match",
|
||||
extra={"field_name": field_name, "source_ref": source_ref},
|
||||
)
|
||||
candidates.append(
|
||||
self._build_candidate_payload(
|
||||
rank=1,
|
||||
match_type=CandidateMatchType.EXACT,
|
||||
confidence_score=1.0,
|
||||
row=exact_match,
|
||||
)
|
||||
)
|
||||
else:
|
||||
fuzzy_matches = self._find_fuzzy_matches(field_name, normalized_rows)
|
||||
for rank_offset, fuzzy_match in enumerate(fuzzy_matches, start=1):
|
||||
candidates.append(
|
||||
self._build_candidate_payload(
|
||||
rank=rank_offset,
|
||||
match_type=CandidateMatchType.FUZZY,
|
||||
confidence_score=float(fuzzy_match["score"]),
|
||||
row=fuzzy_match["row"],
|
||||
)
|
||||
)
|
||||
|
||||
if not candidates:
|
||||
unresolved_fields.append(field_name)
|
||||
resolved_fields.append(
|
||||
{
|
||||
"field_name": field_name,
|
||||
"applied_candidate": None,
|
||||
"candidates": [],
|
||||
"provenance": FieldProvenance.UNRESOLVED.value,
|
||||
"needs_review": True,
|
||||
"has_conflict": False,
|
||||
"is_locked": False,
|
||||
"status": "unresolved",
|
||||
}
|
||||
)
|
||||
logger.explore(
|
||||
"No trusted dictionary match found for field",
|
||||
extra={"field_name": field_name, "source_ref": source_ref},
|
||||
)
|
||||
continue
|
||||
|
||||
ranked_candidates = self.rank_candidates(candidates)
|
||||
applied_candidate = ranked_candidates[0]
|
||||
has_conflict = len(ranked_candidates) > 1
|
||||
provenance = (
|
||||
FieldProvenance.DICTIONARY_EXACT.value
|
||||
if applied_candidate["match_type"] == CandidateMatchType.EXACT.value
|
||||
else FieldProvenance.FUZZY_INFERRED.value
|
||||
)
|
||||
needs_review = applied_candidate["match_type"] != CandidateMatchType.EXACT.value
|
||||
|
||||
resolved_fields.append(
|
||||
{
|
||||
"field_name": field_name,
|
||||
"applied_candidate": applied_candidate,
|
||||
"candidates": ranked_candidates,
|
||||
"provenance": provenance,
|
||||
"needs_review": needs_review,
|
||||
"has_conflict": has_conflict,
|
||||
"is_locked": False,
|
||||
"status": "resolved",
|
||||
}
|
||||
)
|
||||
|
||||
result = DictionaryResolutionResult(
|
||||
source_ref=source_ref,
|
||||
resolved_fields=resolved_fields,
|
||||
unresolved_fields=unresolved_fields,
|
||||
partial_recovery=bool(unresolved_fields),
|
||||
)
|
||||
logger.reflect(
|
||||
"Dictionary resolution completed",
|
||||
extra={
|
||||
"source_ref": source_ref,
|
||||
"resolved_fields": len(resolved_fields),
|
||||
"unresolved_fields": len(unresolved_fields),
|
||||
"partial_recovery": result.partial_recovery,
|
||||
},
|
||||
)
|
||||
return result
|
||||
# [/DEF:SemanticSourceResolver.resolve_from_dictionary:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.resolve_from_reference_dataset:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Reuse semantic metadata from trusted Superset datasets.
|
||||
def resolve_from_reference_dataset(
|
||||
self,
|
||||
source_payload: Mapping[str, Any],
|
||||
fields: Iterable[Mapping[str, Any]],
|
||||
) -> DictionaryResolutionResult:
|
||||
return DictionaryResolutionResult(source_ref=str(source_payload.get("source_ref") or "reference_dataset"))
|
||||
# [/DEF:SemanticSourceResolver.resolve_from_reference_dataset:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.rank_candidates:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Apply confidence ordering and determine best candidate per field.
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticCandidate]
|
||||
def rank_candidates(self, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
ranked = sorted(
|
||||
candidates,
|
||||
key=lambda candidate: (
|
||||
self._match_priority(candidate.get("match_type")),
|
||||
-float(candidate.get("confidence_score", 0.0)),
|
||||
int(candidate.get("candidate_rank", 999)),
|
||||
),
|
||||
)
|
||||
for index, candidate in enumerate(ranked, start=1):
|
||||
candidate["candidate_rank"] = index
|
||||
return ranked
|
||||
# [/DEF:SemanticSourceResolver.rank_candidates:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.detect_conflicts:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Mark competing candidate sets that require explicit user review.
|
||||
def detect_conflicts(self, candidates: List[Dict[str, Any]]) -> bool:
|
||||
return len(candidates) > 1
|
||||
# [/DEF:SemanticSourceResolver.detect_conflicts:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.apply_field_decision:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Accept, reject, or manually override a field-level semantic value.
|
||||
def apply_field_decision(self, field_state: Mapping[str, Any], decision: Mapping[str, Any]) -> Dict[str, Any]:
|
||||
merged = dict(field_state)
|
||||
merged.update(decision)
|
||||
return merged
|
||||
# [/DEF:SemanticSourceResolver.apply_field_decision:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver.propagate_source_version_update:Function]
|
||||
# @COMPLEXITY: 4
|
||||
# @PURPOSE: Propagate a semantic source version change to unlocked field entries without silently overwriting manual or locked values.
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticSource]
|
||||
# @RELATION: [DEPENDS_ON] ->[SemanticFieldEntry]
|
||||
# @PRE: source is persisted and fields belong to the same session aggregate.
|
||||
# @POST: unlocked fields linked to the source carry the new source version and are marked reviewable; manual or locked fields keep their active values untouched.
|
||||
# @SIDE_EFFECT: mutates in-memory field state for the caller to persist.
|
||||
# @DATA_CONTRACT: Input[SemanticSource,List[SemanticFieldEntry]] -> Output[Dict[str,int]]
|
||||
def propagate_source_version_update(
|
||||
self,
|
||||
source: SemanticSource,
|
||||
fields: Iterable[Any],
|
||||
) -> Dict[str, int]:
|
||||
with belief_scope("SemanticSourceResolver.propagate_source_version_update"):
|
||||
source_id = str(source.source_id or "").strip()
|
||||
source_version = str(source.source_version or "").strip()
|
||||
if not source_id or not source_version:
|
||||
logger.explore(
|
||||
"Semantic source version propagation rejected due to incomplete source metadata",
|
||||
extra={"source_id": source_id, "source_version": source_version},
|
||||
)
|
||||
raise ValueError("Semantic source must provide source_id and source_version")
|
||||
|
||||
propagated = 0
|
||||
preserved_locked = 0
|
||||
untouched = 0
|
||||
for field in fields:
|
||||
if str(getattr(field, "source_id", "") or "").strip() != source_id:
|
||||
untouched += 1
|
||||
continue
|
||||
if bool(getattr(field, "is_locked", False)) or getattr(field, "provenance", None) == FieldProvenance.MANUAL_OVERRIDE:
|
||||
preserved_locked += 1
|
||||
continue
|
||||
|
||||
field.source_version = source_version
|
||||
field.needs_review = True
|
||||
field.has_conflict = bool(getattr(field, "has_conflict", False))
|
||||
propagated += 1
|
||||
|
||||
logger.reflect(
|
||||
"Semantic source version propagation completed",
|
||||
extra={
|
||||
"source_id": source_id,
|
||||
"source_version": source_version,
|
||||
"propagated": propagated,
|
||||
"preserved_locked": preserved_locked,
|
||||
"untouched": untouched,
|
||||
},
|
||||
)
|
||||
return {
|
||||
"propagated": propagated,
|
||||
"preserved_locked": preserved_locked,
|
||||
"untouched": untouched,
|
||||
}
|
||||
# [/DEF:SemanticSourceResolver.propagate_source_version_update:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver._normalize_dictionary_row:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Normalize one dictionary row into a consistent lookup structure.
|
||||
def _normalize_dictionary_row(self, row: Mapping[str, Any]) -> Dict[str, Any]:
|
||||
field_name = (
|
||||
row.get("field_name")
|
||||
or row.get("column_name")
|
||||
or row.get("name")
|
||||
or row.get("field")
|
||||
)
|
||||
normalized_name = str(field_name or "").strip()
|
||||
return {
|
||||
"field_name": normalized_name,
|
||||
"field_key": self._normalize_key(normalized_name),
|
||||
"verbose_name": row.get("verbose_name") or row.get("label"),
|
||||
"description": row.get("description"),
|
||||
"display_format": row.get("display_format") or row.get("format"),
|
||||
}
|
||||
# [/DEF:SemanticSourceResolver._normalize_dictionary_row:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver._find_fuzzy_matches:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Produce confidence-scored fuzzy matches while keeping them reviewable.
|
||||
def _find_fuzzy_matches(self, field_name: str, rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
normalized_target = self._normalize_key(field_name)
|
||||
fuzzy_matches: List[Dict[str, Any]] = []
|
||||
for row in rows:
|
||||
candidate_key = str(row.get("field_key") or "")
|
||||
if not candidate_key:
|
||||
continue
|
||||
score = SequenceMatcher(None, normalized_target, candidate_key).ratio()
|
||||
if score < 0.72:
|
||||
continue
|
||||
fuzzy_matches.append({"row": row, "score": round(score, 3)})
|
||||
fuzzy_matches.sort(key=lambda item: item["score"], reverse=True)
|
||||
return fuzzy_matches[:3]
|
||||
# [/DEF:SemanticSourceResolver._find_fuzzy_matches:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver._build_candidate_payload:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Project normalized dictionary rows into semantic candidate payloads.
|
||||
def _build_candidate_payload(
|
||||
self,
|
||||
rank: int,
|
||||
match_type: CandidateMatchType,
|
||||
confidence_score: float,
|
||||
row: Mapping[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
return {
|
||||
"candidate_rank": rank,
|
||||
"match_type": match_type.value,
|
||||
"confidence_score": confidence_score,
|
||||
"proposed_verbose_name": row.get("verbose_name"),
|
||||
"proposed_description": row.get("description"),
|
||||
"proposed_display_format": row.get("display_format"),
|
||||
"status": CandidateStatus.PROPOSED.value,
|
||||
}
|
||||
# [/DEF:SemanticSourceResolver._build_candidate_payload:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver._match_priority:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Encode trusted-confidence ordering so exact dictionary reuse beats fuzzy invention.
|
||||
def _match_priority(self, match_type: Optional[str]) -> int:
|
||||
priority = {
|
||||
CandidateMatchType.EXACT.value: 0,
|
||||
CandidateMatchType.REFERENCE.value: 1,
|
||||
CandidateMatchType.FUZZY.value: 2,
|
||||
CandidateMatchType.GENERATED.value: 3,
|
||||
}
|
||||
return priority.get(str(match_type or ""), 99)
|
||||
# [/DEF:SemanticSourceResolver._match_priority:Function]
|
||||
|
||||
# [DEF:SemanticSourceResolver._normalize_key:Function]
|
||||
# @COMPLEXITY: 1
|
||||
# @PURPOSE: Normalize field identifiers for stable exact/fuzzy comparisons.
|
||||
def _normalize_key(self, value: str) -> str:
|
||||
return "".join(ch for ch in str(value or "").strip().lower() if ch.isalnum() or ch == "_")
|
||||
# [/DEF:SemanticSourceResolver._normalize_key:Function]
|
||||
# [/DEF:SemanticSourceResolver:Class]
|
||||
|
||||
# [/DEF:SemanticSourceResolver:Module]
|
||||
@@ -50,6 +50,7 @@ class GitService:
|
||||
with belief_scope("GitService.__init__"):
|
||||
backend_root = Path(__file__).parents[2]
|
||||
self.legacy_base_path = str((backend_root / "git_repos").resolve())
|
||||
self._uses_default_base_path = base_path == "git_repos"
|
||||
self.base_path = self._resolve_base_path(base_path)
|
||||
self._ensure_base_path_exists()
|
||||
# [/DEF:backend.src.services.git_service.GitService.__init__:Function]
|
||||
@@ -64,7 +65,13 @@ class GitService:
|
||||
base = Path(self.base_path)
|
||||
if base.exists() and not base.is_dir():
|
||||
raise ValueError(f"Git repositories base path is not a directory: {self.base_path}")
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
except (PermissionError, OSError) as e:
|
||||
logger.warning(
|
||||
f"[_ensure_base_path_exists][Coherence:Failed] Cannot create Git repositories base path: {self.base_path}. Error: {e}"
|
||||
)
|
||||
raise ValueError(f"Cannot create Git repositories base path: {self.base_path}. {e}")
|
||||
# [/DEF:backend.src.services.git_service.GitService._ensure_base_path_exists:Function]
|
||||
|
||||
# [DEF:backend.src.services.git_service.GitService._resolve_base_path:Function]
|
||||
@@ -281,6 +288,9 @@ class GitService:
|
||||
normalized_key = self._normalize_repo_key(fallback_key)
|
||||
target_path = os.path.join(self.base_path, normalized_key)
|
||||
|
||||
if not self._uses_default_base_path:
|
||||
return target_path
|
||||
|
||||
try:
|
||||
session = SessionLocal()
|
||||
try:
|
||||
@@ -345,10 +355,14 @@ class GitService:
|
||||
logger.warning(
|
||||
f"[init_repo][Action] Existing path is not a Git repository, recreating: {repo_path}"
|
||||
)
|
||||
if os.path.isdir(repo_path):
|
||||
shutil.rmtree(repo_path)
|
||||
else:
|
||||
os.remove(repo_path)
|
||||
stale_path = Path(repo_path)
|
||||
if stale_path.exists():
|
||||
shutil.rmtree(stale_path, ignore_errors=True)
|
||||
if stale_path.exists():
|
||||
try:
|
||||
stale_path.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
repo = Repo.clone_from(auth_url, repo_path)
|
||||
self._ensure_gitflow_branches(repo, dashboard_id)
|
||||
return repo
|
||||
|
||||
@@ -23,14 +23,25 @@ MASKED_API_KEY_PLACEHOLDER = "********"
|
||||
# @PURPOSE: Load and validate the Fernet key used for secret encryption.
|
||||
# @PRE: ENCRYPTION_KEY environment variable must be set to a valid Fernet key.
|
||||
# @POST: Returns validated key bytes ready for Fernet initialization.
|
||||
# @RELATION: DEPENDS_ON -> backend.src.core.logger
|
||||
# @SIDE_EFFECT: Emits belief-state logs for missing or invalid encryption configuration.
|
||||
# @INVARIANT: Encryption initialization never falls back to a hardcoded secret.
|
||||
def _require_fernet_key() -> bytes:
|
||||
raw_key = os.getenv("ENCRYPTION_KEY", "").strip()
|
||||
if not raw_key:
|
||||
raise RuntimeError("ENCRYPTION_KEY must be set to a valid Fernet key")
|
||||
with belief_scope("_require_fernet_key"):
|
||||
raw_key = os.getenv("ENCRYPTION_KEY", "").strip()
|
||||
if not raw_key:
|
||||
logger.explore("Missing ENCRYPTION_KEY blocks EncryptionManager initialization")
|
||||
raise RuntimeError("ENCRYPTION_KEY must be set")
|
||||
|
||||
key = raw_key.encode()
|
||||
Fernet(key)
|
||||
return key
|
||||
key = raw_key.encode()
|
||||
try:
|
||||
Fernet(key)
|
||||
except Exception as exc:
|
||||
logger.explore("Invalid ENCRYPTION_KEY blocks EncryptionManager initialization")
|
||||
raise RuntimeError("ENCRYPTION_KEY must be a valid Fernet key") from exc
|
||||
|
||||
logger.reflect("Validated ENCRYPTION_KEY for EncryptionManager initialization")
|
||||
return key
|
||||
# [/DEF:_require_fernet_key:Function]
|
||||
|
||||
# [DEF:EncryptionManager:Class]
|
||||
|
||||
134
backend/tests/services/dataset_review/test_superset_matrix.py
Normal file
134
backend/tests/services/dataset_review/test_superset_matrix.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# [DEF:SupersetCompatibilityMatrixTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: dataset_review, superset, compatibility_matrix, preview, sql_lab, tests
|
||||
# @PURPOSE: Verifies Superset preview and SQL Lab endpoint fallback strategy used by dataset-review orchestration.
|
||||
# @LAYER: Tests
|
||||
# @RELATION: [DEPENDS_ON] ->[backend.src.core.superset_client.SupersetClient]
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Import models to ensure proper SQLAlchemy registration
|
||||
from src.models.auth import User
|
||||
from src.models.dataset_review import CompiledPreview
|
||||
|
||||
from src.core.utils.superset_compilation_adapter import (
|
||||
PreviewCompilationPayload,
|
||||
SqlLabLaunchPayload,
|
||||
SupersetCompilationAdapter,
|
||||
)
|
||||
|
||||
|
||||
# [DEF:make_adapter:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Build an adapter with a mock Superset client and deterministic environment for compatibility tests.
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
def make_adapter():
|
||||
environment = SimpleNamespace(
|
||||
id="env-1",
|
||||
name="Test Env",
|
||||
url="http://superset.example",
|
||||
username="user",
|
||||
password="pass",
|
||||
verify_ssl=True,
|
||||
timeout=30,
|
||||
)
|
||||
client = MagicMock()
|
||||
client.network = MagicMock()
|
||||
return SupersetCompilationAdapter(environment=environment, client=client), client
|
||||
# [/DEF:make_adapter:Function]
|
||||
|
||||
|
||||
# [DEF:test_preview_prefers_supported_client_method_before_network_fallback:Function]
|
||||
# @COMPLEXITY: 2
|
||||
# @PURPOSE: Confirms preview compilation uses a supported client method first when the capability exists.
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
def test_preview_prefers_supported_client_method_before_network_fallback():
|
||||
adapter, client = make_adapter()
|
||||
client.compile_preview = MagicMock(return_value={"compiled_sql": "SELECT 1"})
|
||||
payload = PreviewCompilationPayload(
|
||||
session_id="sess-1",
|
||||
dataset_id=42,
|
||||
preview_fingerprint="fp-1",
|
||||
template_params={"country": "RU"},
|
||||
effective_filters=[{"name": "country", "value": "RU"}],
|
||||
)
|
||||
|
||||
preview = adapter.compile_preview(payload)
|
||||
|
||||
assert preview.preview_status.value == "ready"
|
||||
assert preview.compiled_sql == "SELECT 1"
|
||||
client.compile_preview.assert_called_once()
|
||||
client.network.request.assert_not_called()
|
||||
# [/DEF:test_preview_prefers_supported_client_method_before_network_fallback:Function]
|
||||
|
||||
|
||||
# [DEF:test_preview_falls_back_across_matrix_until_supported_endpoint_returns_sql:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Confirms preview fallback walks the compatibility matrix from preferred to legacy endpoints until one returns compiled SQL.
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
def test_preview_falls_back_across_matrix_until_supported_endpoint_returns_sql():
|
||||
adapter, client = make_adapter()
|
||||
payload = PreviewCompilationPayload(
|
||||
session_id="sess-2",
|
||||
dataset_id=77,
|
||||
preview_fingerprint="fp-2",
|
||||
template_params={"region": "emea"},
|
||||
effective_filters=[],
|
||||
)
|
||||
|
||||
client.network.request.side_effect = [
|
||||
RuntimeError("preview endpoint unavailable"),
|
||||
{"result": {"sql": "SELECT * FROM dataset_77"}},
|
||||
]
|
||||
|
||||
preview = adapter.compile_preview(payload)
|
||||
|
||||
assert preview.preview_status.value == "ready"
|
||||
assert preview.compiled_sql == "SELECT * FROM dataset_77"
|
||||
assert client.network.request.call_count == 2
|
||||
first_call = client.network.request.call_args_list[0].kwargs
|
||||
second_call = client.network.request.call_args_list[1].kwargs
|
||||
assert first_call["endpoint"] == "/dataset/77/preview"
|
||||
assert second_call["endpoint"] == "/dataset/77/sql"
|
||||
# [/DEF:test_preview_falls_back_across_matrix_until_supported_endpoint_returns_sql:Function]
|
||||
|
||||
|
||||
# [DEF:test_sql_lab_launch_falls_back_to_legacy_execute_endpoint:Function]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Confirms SQL Lab launch falls back from modern to legacy execute endpoint and preserves canonical session reference extraction.
|
||||
# @RELATION: [DEPENDS_ON] ->[SupersetCompilationAdapter]
|
||||
def test_sql_lab_launch_falls_back_to_legacy_execute_endpoint():
|
||||
adapter, client = make_adapter()
|
||||
client.get_dataset.return_value = {
|
||||
"result": {
|
||||
"id": 55,
|
||||
"schema": "public",
|
||||
"database": {"id": 9},
|
||||
}
|
||||
}
|
||||
client.network.request.side_effect = [
|
||||
RuntimeError("sqllab execute unavailable"),
|
||||
{"result": {"id": "query-123"}},
|
||||
]
|
||||
payload = SqlLabLaunchPayload(
|
||||
session_id="sess-3",
|
||||
dataset_id=55,
|
||||
preview_id="preview-9",
|
||||
compiled_sql="SELECT * FROM sales",
|
||||
template_params={"limit": 10},
|
||||
)
|
||||
|
||||
sql_lab_ref = adapter.create_sql_lab_session(payload)
|
||||
|
||||
assert sql_lab_ref == "query-123"
|
||||
assert client.network.request.call_count == 2
|
||||
first_call = client.network.request.call_args_list[0].kwargs
|
||||
second_call = client.network.request.call_args_list[1].kwargs
|
||||
assert first_call["endpoint"] == "/sqllab/execute/"
|
||||
assert second_call["endpoint"] == "/sql_lab/execute/"
|
||||
# [/DEF:test_sql_lab_launch_falls_back_to_legacy_execute_endpoint:Function]
|
||||
|
||||
|
||||
# [/DEF:SupersetCompatibilityMatrixTests:Module]
|
||||
@@ -1,374 +0,0 @@
|
||||
# [DEF:test_task_logger:Module]
|
||||
# @SEMANTICS: test, task_logger, task_context, unit_test
|
||||
# @PURPOSE: Unit tests for TaskLogger and TaskContext.
|
||||
# @LAYER: Test
|
||||
# @RELATION: TESTS -> TaskLogger, TaskContext
|
||||
# @COMPLEXITY: 3
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
from unittest.mock import Mock
|
||||
|
||||
from src.core.task_manager.task_logger import TaskLogger
|
||||
from src.core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TestTaskLogger:Class]
|
||||
# @PURPOSE: Test suite for TaskLogger.
|
||||
# @COMPLEXITY: 3
|
||||
class TestTaskLogger:
|
||||
|
||||
# [DEF:setup_method:Function]
|
||||
# @PURPOSE: Setup for each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Mock add_log_fn created.
|
||||
def setup_method(self):
|
||||
"""Create a mock add_log function for testing."""
|
||||
self.mock_add_log = Mock()
|
||||
self.logger = TaskLogger(
|
||||
task_id="test-task-1",
|
||||
add_log_fn=self.mock_add_log,
|
||||
source="test_source"
|
||||
)
|
||||
# [/DEF:setup_method:Function]
|
||||
|
||||
# [DEF:test_init:Function]
|
||||
# @PURPOSE: Test TaskLogger initialization.
|
||||
# @PRE: None.
|
||||
# @POST: Logger instance created with correct attributes.
|
||||
def test_init(self):
|
||||
"""Test TaskLogger initialization."""
|
||||
assert self.logger._task_id == "test-task-1"
|
||||
assert self.logger._default_source == "test_source"
|
||||
assert self.logger._add_log == self.mock_add_log
|
||||
# [/DEF:test_init:Function]
|
||||
|
||||
# [DEF:test_with_source:Function]
|
||||
# @PURPOSE: Test creating a sub-logger with different source.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: New logger created with different source but same task_id.
|
||||
def test_with_source(self):
|
||||
"""Test creating a sub-logger with different source."""
|
||||
sub_logger = self.logger.with_source("new_source")
|
||||
|
||||
assert sub_logger._task_id == "test-task-1"
|
||||
assert sub_logger._default_source == "new_source"
|
||||
assert sub_logger._add_log == self.mock_add_log
|
||||
# [/DEF:test_with_source:Function]
|
||||
|
||||
# [DEF:test_debug:Function]
|
||||
# @PURPOSE: Test debug log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with DEBUG level.
|
||||
def test_debug(self):
|
||||
"""Test debug logging."""
|
||||
self.logger.debug("Debug message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="DEBUG",
|
||||
message="Debug message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_debug:Function]
|
||||
|
||||
# [DEF:test_info:Function]
|
||||
# @PURPOSE: Test info log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with INFO level.
|
||||
def test_info(self):
|
||||
"""Test info logging."""
|
||||
self.logger.info("Info message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Info message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_info:Function]
|
||||
|
||||
# [DEF:test_warning:Function]
|
||||
# @PURPOSE: Test warning log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with WARNING level.
|
||||
def test_warning(self):
|
||||
"""Test warning logging."""
|
||||
self.logger.warning("Warning message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="WARNING",
|
||||
message="Warning message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_warning:Function]
|
||||
|
||||
# [DEF:test_error:Function]
|
||||
# @PURPOSE: Test error log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with ERROR level.
|
||||
def test_error(self):
|
||||
"""Test error logging."""
|
||||
self.logger.error("Error message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="ERROR",
|
||||
message="Error message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_error:Function]
|
||||
|
||||
# [DEF:test_error_with_metadata:Function]
|
||||
# @PURPOSE: Test error logging with metadata.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with ERROR level and metadata.
|
||||
def test_error_with_metadata(self):
|
||||
"""Test error logging with metadata."""
|
||||
metadata = {"error_code": 500, "details": "Connection failed"}
|
||||
self.logger.error("Error message", metadata=metadata)
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="ERROR",
|
||||
message="Error message",
|
||||
source="test_source",
|
||||
metadata=metadata
|
||||
)
|
||||
# [/DEF:test_error_with_metadata:Function]
|
||||
|
||||
# [DEF:test_progress:Function]
|
||||
# @PURPOSE: Test progress logging.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with INFO level and progress metadata.
|
||||
def test_progress(self):
|
||||
"""Test progress logging."""
|
||||
self.logger.progress("Processing items", percent=50)
|
||||
|
||||
expected_metadata = {"progress": 50}
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Processing items",
|
||||
source="test_source",
|
||||
metadata=expected_metadata
|
||||
)
|
||||
# [/DEF:test_progress:Function]
|
||||
|
||||
# [DEF:test_progress_clamping:Function]
|
||||
# @PURPOSE: Test progress value clamping (0-100).
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: Progress values clamped to 0-100 range.
|
||||
def test_progress_clamping(self):
|
||||
"""Test progress value clamping."""
|
||||
# Test below 0
|
||||
self.logger.progress("Below 0", percent=-10)
|
||||
call1 = self.mock_add_log.call_args_list[0]
|
||||
assert call1.kwargs["metadata"]["progress"] == 0
|
||||
|
||||
self.mock_add_log.reset_mock()
|
||||
|
||||
# Test above 100
|
||||
self.logger.progress("Above 100", percent=150)
|
||||
call2 = self.mock_add_log.call_args_list[0]
|
||||
assert call2.kwargs["metadata"]["progress"] == 100
|
||||
# [/DEF:test_progress_clamping:Function]
|
||||
|
||||
# [DEF:test_source_override:Function]
|
||||
# @PURPOSE: Test overriding the default source.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with overridden source.
|
||||
def test_source_override(self):
|
||||
"""Test overriding the default source."""
|
||||
self.logger.info("Message", source="override_source")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Message",
|
||||
source="override_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_source_override:Function]
|
||||
|
||||
# [DEF:test_sub_logger_source_independence:Function]
|
||||
# @PURPOSE: Test sub-logger independence from parent.
|
||||
# @PRE: Logger and sub-logger initialized.
|
||||
# @POST: Sub-logger has different source, parent unchanged.
|
||||
def test_sub_logger_source_independence(self):
|
||||
"""Test sub-logger source independence from parent."""
|
||||
sub_logger = self.logger.with_source("sub_source")
|
||||
|
||||
# Log with parent
|
||||
self.logger.info("Parent message")
|
||||
|
||||
# Log with sub-logger
|
||||
sub_logger.info("Sub message")
|
||||
|
||||
# Verify both calls were made with correct sources
|
||||
calls = self.mock_add_log.call_args_list
|
||||
assert len(calls) == 2
|
||||
assert calls[0].kwargs["source"] == "test_source"
|
||||
assert calls[1].kwargs["source"] == "sub_source"
|
||||
# [/DEF:test_sub_logger_source_independence:Function]
|
||||
|
||||
# [/DEF:TestTaskLogger:Class]
|
||||
|
||||
# [DEF:TestTaskContext:Class]
|
||||
# @PURPOSE: Test suite for TaskContext.
|
||||
# @COMPLEXITY: 3
|
||||
class TestTaskContext:
|
||||
|
||||
# [DEF:setup_method:Function]
|
||||
# @PURPOSE: Setup for each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Mock add_log_fn created.
|
||||
def setup_method(self):
|
||||
"""Create a mock add_log function for testing."""
|
||||
self.mock_add_log = Mock()
|
||||
self.params = {"param1": "value1", "param2": "value2"}
|
||||
self.context = TaskContext(
|
||||
task_id="test-task-2",
|
||||
add_log_fn=self.mock_add_log,
|
||||
params=self.params,
|
||||
default_source="plugin"
|
||||
)
|
||||
# [/DEF:setup_method:Function]
|
||||
|
||||
# [DEF:test_init:Function]
|
||||
# @PURPOSE: Test TaskContext initialization.
|
||||
# @PRE: None.
|
||||
# @POST: Context instance created with correct attributes.
|
||||
def test_init(self):
|
||||
"""Test TaskContext initialization."""
|
||||
assert self.context._task_id == "test-task-2"
|
||||
assert self.context._params == self.params
|
||||
assert isinstance(self.context._logger, TaskLogger)
|
||||
assert self.context._logger._default_source == "plugin"
|
||||
# [/DEF:test_init:Function]
|
||||
|
||||
# [DEF:test_task_id_property:Function]
|
||||
# @PURPOSE: Test task_id property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns correct task_id.
|
||||
def test_task_id_property(self):
|
||||
"""Test task_id property."""
|
||||
assert self.context.task_id == "test-task-2"
|
||||
# [/DEF:test_task_id_property:Function]
|
||||
|
||||
# [DEF:test_logger_property:Function]
|
||||
# @PURPOSE: Test logger property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns TaskLogger instance.
|
||||
def test_logger_property(self):
|
||||
"""Test logger property."""
|
||||
logger = self.context.logger
|
||||
assert isinstance(logger, TaskLogger)
|
||||
assert logger._task_id == "test-task-2"
|
||||
assert logger._default_source == "plugin"
|
||||
# [/DEF:test_logger_property:Function]
|
||||
|
||||
# [DEF:test_params_property:Function]
|
||||
# @PURPOSE: Test params property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns correct params dict.
|
||||
def test_params_property(self):
|
||||
"""Test params property."""
|
||||
assert self.context.params == self.params
|
||||
# [/DEF:test_params_property:Function]
|
||||
|
||||
# [DEF:test_get_param:Function]
|
||||
# @PURPOSE: Test getting a specific parameter.
|
||||
# @PRE: Context initialized with params.
|
||||
# @POST: Returns parameter value or default.
|
||||
def test_get_param(self):
|
||||
"""Test getting a specific parameter."""
|
||||
assert self.context.get_param("param1") == "value1"
|
||||
assert self.context.get_param("param2") == "value2"
|
||||
assert self.context.get_param("nonexistent") is None
|
||||
assert self.context.get_param("nonexistent", "default") == "default"
|
||||
# [/DEF:test_get_param:Function]
|
||||
|
||||
# [DEF:test_create_sub_context:Function]
|
||||
# @PURPOSE: Test creating a sub-context with different source.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: New context created with different logger source.
|
||||
def test_create_sub_context(self):
|
||||
"""Test creating a sub-context with different source."""
|
||||
sub_context = self.context.create_sub_context("new_source")
|
||||
|
||||
assert sub_context._task_id == "test-task-2"
|
||||
assert sub_context._params == self.params
|
||||
assert sub_context._logger._default_source == "new_source"
|
||||
assert sub_context._logger._task_id == "test-task-2"
|
||||
# [/DEF:test_create_sub_context:Function]
|
||||
|
||||
# [DEF:test_context_logger_delegates_to_task_logger:Function]
|
||||
# @PURPOSE: Test context logger delegates to TaskLogger.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Logger calls are delegated to TaskLogger.
|
||||
def test_context_logger_delegates_to_task_logger(self):
|
||||
"""Test context logger delegates to TaskLogger."""
|
||||
# Call through context
|
||||
self.context.logger.info("Test message")
|
||||
|
||||
# Verify the mock was called
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-2",
|
||||
level="INFO",
|
||||
message="Test message",
|
||||
source="plugin",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_context_logger_delegates_to_task_logger:Function]
|
||||
|
||||
# [DEF:test_sub_context_with_source:Function]
|
||||
# @PURPOSE: Test sub-context logger uses new source.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Sub-context logger uses new source.
|
||||
def test_sub_context_with_source(self):
|
||||
"""Test sub-context logger uses new source."""
|
||||
sub_context = self.context.create_sub_context("api_source")
|
||||
|
||||
# Log through sub-context
|
||||
sub_context.logger.info("API message")
|
||||
|
||||
# Verify the mock was called with new source
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-2",
|
||||
level="INFO",
|
||||
message="API message",
|
||||
source="api_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_sub_context_with_source:Function]
|
||||
|
||||
# [DEF:test_multiple_sub_contexts:Function]
|
||||
# @PURPOSE: Test creating multiple sub-contexts.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Each sub-context has independent logger source.
|
||||
def test_multiple_sub_contexts(self):
|
||||
"""Test creating multiple sub-contexts."""
|
||||
sub1 = self.context.create_sub_context("source1")
|
||||
sub2 = self.context.create_sub_context("source2")
|
||||
sub3 = self.context.create_sub_context("source3")
|
||||
|
||||
assert sub1._logger._default_source == "source1"
|
||||
assert sub2._logger._default_source == "source2"
|
||||
assert sub3._logger._default_source == "source3"
|
||||
|
||||
# All should have same task_id and params
|
||||
assert sub1._task_id == "test-task-2"
|
||||
assert sub2._task_id == "test-task-2"
|
||||
assert sub3._task_id == "test-task-2"
|
||||
assert sub1._params == self.params
|
||||
assert sub2._params == self.params
|
||||
assert sub3._params == self.params
|
||||
# [/DEF:test_multiple_sub_contexts:Function]
|
||||
|
||||
# [/DEF:TestTaskContext:Class]
|
||||
# [/DEF:test_task_logger:Module]
|
||||
35
build.sh
35
build.sh
@@ -7,6 +7,23 @@ cd "$SCRIPT_DIR"
|
||||
|
||||
BACKEND_ENV_FILE="$SCRIPT_DIR/backend/.env"
|
||||
|
||||
PROFILE="${1:-current}"
|
||||
|
||||
case "$PROFILE" in
|
||||
master)
|
||||
PROFILE_ENV_FILE="$SCRIPT_DIR/.env.master"
|
||||
PROJECT_NAME="ss-tools-master"
|
||||
;;
|
||||
current)
|
||||
PROFILE_ENV_FILE="$SCRIPT_DIR/.env.current"
|
||||
PROJECT_NAME="ss-tools-current"
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown profile '$PROFILE'. Use one of: master, current."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
echo "Error: docker is not installed or not in PATH."
|
||||
exit 1
|
||||
@@ -80,11 +97,23 @@ PY
|
||||
|
||||
ensure_backend_encryption_key
|
||||
|
||||
COMPOSE_ARGS=(-p "$PROJECT_NAME")
|
||||
if [[ -f "$PROFILE_ENV_FILE" ]]; then
|
||||
COMPOSE_ARGS+=(--env-file "$PROFILE_ENV_FILE")
|
||||
else
|
||||
echo "[build] Warning: profile env file not found at $PROFILE_ENV_FILE, using compose defaults."
|
||||
fi
|
||||
|
||||
echo "[build] Profile: $PROFILE (project: $PROJECT_NAME)"
|
||||
if [[ -f "$PROFILE_ENV_FILE" ]]; then
|
||||
echo "[build] Env file: $PROFILE_ENV_FILE"
|
||||
fi
|
||||
|
||||
echo "[1/2] Building project images..."
|
||||
"${COMPOSE_CMD[@]}" build
|
||||
"${COMPOSE_CMD[@]}" "${COMPOSE_ARGS[@]}" build
|
||||
|
||||
echo "[2/2] Starting Docker services..."
|
||||
"${COMPOSE_CMD[@]}" up -d
|
||||
"${COMPOSE_CMD[@]}" "${COMPOSE_ARGS[@]}" up -d
|
||||
|
||||
echo "Done. Services are running."
|
||||
echo "Use '${COMPOSE_CMD[*]} ps' to check status and '${COMPOSE_CMD[*]} logs -f' to stream logs."
|
||||
echo "Use '${COMPOSE_CMD[*]} ${COMPOSE_ARGS[*]} ps' to check status and '${COMPOSE_CMD[*]} ${COMPOSE_ARGS[*]} logs -f' to stream logs."
|
||||
|
||||
@@ -2,7 +2,6 @@ services:
|
||||
db:
|
||||
image: ${POSTGRES_IMAGE:?Set POSTGRES_IMAGE in .env.enterprise-clean}
|
||||
pull_policy: never
|
||||
container_name: ss_tools_db
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-ss_tools}
|
||||
@@ -21,7 +20,6 @@ services:
|
||||
backend:
|
||||
image: ${BACKEND_IMAGE:?Set BACKEND_IMAGE in .env.enterprise-clean}
|
||||
pull_policy: never
|
||||
container_name: ss_tools_backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
@@ -50,7 +48,6 @@ services:
|
||||
frontend:
|
||||
image: ${FRONTEND_IMAGE:?Set FRONTEND_IMAGE in .env.enterprise-clean}
|
||||
pull_policy: never
|
||||
container_name: ss_tools_frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- backend
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
services:
|
||||
db:
|
||||
image: ${POSTGRES_IMAGE:-postgres:16-alpine}
|
||||
container_name: ss_tools_db
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ss_tools
|
||||
@@ -21,7 +20,6 @@ services:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/backend.Dockerfile
|
||||
container_name: ss_tools_backend
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./backend/.env
|
||||
@@ -34,6 +32,9 @@ services:
|
||||
TASKS_DATABASE_URL: postgresql+psycopg2://postgres:postgres@db:5432/ss_tools
|
||||
AUTH_DATABASE_URL: postgresql+psycopg2://postgres:postgres@db:5432/ss_tools
|
||||
BACKEND_PORT: 8000
|
||||
INITIAL_ADMIN_CREATE: ${INITIAL_ADMIN_CREATE:-false}
|
||||
INITIAL_ADMIN_USERNAME: ${INITIAL_ADMIN_USERNAME:-admin}
|
||||
INITIAL_ADMIN_PASSWORD: ${INITIAL_ADMIN_PASSWORD:-}
|
||||
ports:
|
||||
- "${BACKEND_HOST_PORT:-8001}:8000"
|
||||
volumes:
|
||||
@@ -46,7 +47,6 @@ services:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/frontend.Dockerfile
|
||||
container_name: ss_tools_frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- backend
|
||||
|
||||
@@ -48,7 +48,7 @@ describe('AssistantChatPanel integration contract', () => {
|
||||
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
|
||||
|
||||
expect(source).toContain('<!-- [DEF' + ':AssistantChatPanel:Component] -->');
|
||||
expect(source).toContain('@TIER' + ': CRITICAL');
|
||||
expect(source).toContain('@COMPLEXITY: 5');
|
||||
expect(source).toContain('@UX_STATE: LoadingHistory');
|
||||
expect(source).toContain('@UX_STATE: Sending');
|
||||
expect(source).toContain('@UX_STATE: Error');
|
||||
|
||||
0
frontend/src/lib/components/dataset-review/.gitkeep
Normal file
0
frontend/src/lib/components/dataset-review/.gitkeep
Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user