diff --git a/.ai/standards/semantics.md b/.ai/standards/semantics.md index 4837620f..e8e70c82 100644 --- a/.ai/standards/semantics.md +++ b/.ai/standards/semantics.md @@ -12,9 +12,6 @@ - Persona: `.ai/PERSONA.md` -> `[DEF:Std:UserPersona:Standard]` - Constitution: `.ai/standards/constitution.md` -> `[DEF:Std:Constitution:Standard]` - Semantics standard: `.ai/standards/semantics.md` -> `[DEF:Std:Semantics:Standard]` -- Module map: `.ai/MODULE_MAP.md` -> `[DEF:Module_Map]` -- Project map: `.ai/PROJECT_MAP.md` -> `[DEF:Project_Map:Root]` -- Project map snapshot: `.ai/structure/PROJECT_MAP.md` (generated backing artifact) - Normalized MCP config: `.kilo/mcp.json` -> `[DEF:MCP_Config:Block]` ## 0.1 MCP NORMALIZATION RULE diff --git a/.axiom/semantic_index/index.duckdb b/.axiom/semantic_index/index.duckdb index 4ac55d82..71c69bdb 100644 Binary files a/.axiom/semantic_index/index.duckdb and b/.axiom/semantic_index/index.duckdb differ diff --git a/.kilocode/rules/specify-rules.md b/.kilocode/rules/specify-rules.md deleted file mode 100644 index 33940c09..00000000 --- a/.kilocode/rules/specify-rules.md +++ /dev/null @@ -1,84 +0,0 @@ -# ss-tools Development Guidelines - -Auto-generated from all feature plans. Last updated: 2025-12-19 - -## Knowledge Graph (GRACE) -**CRITICAL**: This project uses a GRACE Knowledge Graph for context. Always load the root map first: -- **Root Map**: `.ai/ROOT.md` -> `[DEF:Project_Knowledge_Map:Root]` -- **Project Map**: `.ai/PROJECT_MAP.md` -> `[DEF:Project_Map]` -- **Standards**: Read `.ai/standards/` for architecture and style rules. - -## Active Technologies -- Python 3.9+, Node.js 18+ + `uvicorn`, `npm`, `bash` (003-project-launch-script) -- Python 3.9+, Node.js 18+ + SvelteKit, FastAPI, Tailwind CSS (inferred from existing frontend) (004-integrate-svelte-kit) -- N/A (Frontend integration) (004-integrate-svelte-kit) -- Python 3.9+, Node.js 18+ + FastAPI, SvelteKit, Tailwind CSS, Pydantic (005-fix-ui-ws-validation) -- N/A (Configuration based) (005-fix-ui-ws-validation) -- Filesystem (plugins, logs, backups), SQLite (optional, for job history if needed) (005-fix-ui-ws-validation) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS (007-migration-dashboard-grid) -- N/A (Superset API integration) (007-migration-dashboard-grid) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS, Pydantic, Superset API (007-migration-dashboard-grid) -- N/A (Superset API integration - read-only for metadata) (007-migration-dashboard-grid) -- Python 3.9+ (backend), Node.js 18+ (frontend) + FastAPI, SvelteKit, Tailwind CSS, Pydantic, SQLAlchemy, Superset API (008-migration-ui-improvements) -- SQLite (optional for job history), existing database for mappings (008-migration-ui-improvements) -- Python 3.9+, Node.js 18+ + FastAPI, SvelteKit, Tailwind CSS, Pydantic, SQLAlchemy, Superset API (008-migration-ui-improvements) -- Python 3.9+, Node.js 18+ + FastAPI, APScheduler, SQLAlchemy, SvelteKit, Tailwind CSS (009-backup-scheduler) -- SQLite (`tasks.db`), JSON (`config.json`) (009-backup-scheduler) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS, Pydantic, SQLAlchemy, `superset_tool` (internal lib) (010-refactor-cli-to-web) -- SQLite (for job history/results, connection configs), Filesystem (for temporary file uploads) (010-refactor-cli-to-web) -- Python 3.9+ + FastAPI, Pydantic, requests, pyyaml (migrated from superset_tool) (012-remove-superset-tool) -- SQLite (tasks.db, migrations.db), Filesystem (012-remove-superset-tool) -- Filesystem (local git repo), SQLite (for GitServerConfig, Environment) (011-git-integration-dashboard) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, GitPython (or CLI git), Pydantic, SQLAlchemy, Superset API (011-git-integration-dashboard) -- SQLite (for config/history), Filesystem (local Git repositories) (011-git-integration-dashboard) -- Node.js 18+ (Frontend Build), Svelte 5.x + SvelteKit, Tailwind CSS, `date-fns` (existing) (013-unify-frontend-css) -- LocalStorage (for language preference) (013-unify-frontend-css) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI (Backend), SvelteKit (Frontend) (014-file-storage-ui) -- Local Filesystem (for artifacts), Config (for storage path) (014-file-storage-ui) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI (Backend), SvelteKit + Tailwind CSS (Frontend) (015-frontend-nav-redesign) -- N/A (UI reorganization and API integration) (015-frontend-nav-redesign) -- SQLite (`auth.db`) for Users, Roles, Permissions, and Mappings. (016-multi-user-auth) -- SQLite (existing `tasks.db` for results, `auth.db` for permissions, `mappings.db` or new `plugins.db` for provider config/metadata) (017-llm-analysis-plugin) -- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS, SQLAlchemy, WebSocket (existing) (019-superset-ux-redesign) -- SQLite (tasks.db, auth.db, migrations.db) - no new database tables required (019-superset-ux-redesign) -- Python 3.9+ (backend), Node.js 18+ (frontend) + FastAPI, SvelteKit, Tailwind CSS, SQLAlchemy/Pydantic task models, existing task/websocket stack (020-task-reports-design) -- SQLite task/result persistence (existing task DB), filesystem only for existing artifacts (no new primary store required) (020-task-reports-design) -- Node.js 18+ runtime, SvelteKit (existing frontend stack) + SvelteKit, Tailwind CSS, existing frontend UI primitives under `frontend/src/lib/components/ui` (001-unify-frontend-style) -- N/A (UI styling and component behavior only) (001-unify-frontend-style) -- Python 3.9+ (backend scripts/services), Shell (release tooling) + FastAPI stack (existing backend), ConfigManager, TaskManager, файловые утилиты, internal artifact registries (020-clean-repo-enterprise) -- PostgreSQL (конфигурации/метаданные), filesystem (артефакты дистрибутива, отчёты проверки) (020-clean-repo-enterprise) -- Python 3.9+ (backend), Node.js 18+ + SvelteKit (frontend) + FastAPI, SQLAlchemy, Pydantic, existing auth stack (`get_current_user`), existing dashboards route/service, Svelte runes (`$state`, `$derived`, `$effect`), Tailwind CSS, frontend `api` wrapper (024-user-dashboard-filter) -- Existing auth database (`AUTH_DATABASE_URL`) with a dedicated per-user preference entity (024-user-dashboard-filter) -- Python 3.9+ (Backend), Node.js 18+ / Svelte 5.x (Frontend) + FastAPI, SQLAlchemy, APScheduler (Backend) | SvelteKit, Tailwind CSS, existing UI components (Frontend) (026-dashboard-health-windows) -- PostgreSQL / SQLite (existing database for `ValidationRecord` and new `ValidationPolicy`) (026-dashboard-health-windows) -- Python 3.9+ backend, Node.js 18+ frontend with Svelte 5 / SvelteKit + FastAPI, SQLAlchemy, Pydantic, existing [SupersetClient](../../backend/src/core/superset_client.py), existing frontend API wrapper patterns, Svelte runes, existing task/websocket stack (027-dataset-llm-orchestration) -- Existing application databases plus filesystem-backed uploaded semantic sources; reuse current configuration and task persistence stores (027-dataset-llm-orchestration) -- Python 3.9+ backend, Node.js 18+ frontend, Svelte 5 / SvelteKit frontend runtime + FastAPI, SQLAlchemy, Pydantic, existing `TaskManager`, existing `SupersetClient`, existing LLM provider stack, SvelteKit, Tailwind CSS, frontend `requestApi`/`fetchApi` wrappers (027-dataset-llm-orchestration) -- Existing application databases for persistent session/domain entities; existing tasks database for async execution metadata; filesystem for optional uploaded semantic sources/artifacts (027-dataset-llm-orchestration) - -- Python 3.9+ (Backend), Node.js 18+ (Frontend Build) (001-plugin-arch-svelte-ui) - -## Project Structure - -```text -backend/ -frontend/ -tests/ -``` - -## Commands - -cd src; pytest; ruff check . - -## Code Style - -Python 3.9+ (Backend), Node.js 18+ (Frontend Build): Follow standard conventions - -## Recent Changes -- 027-dataset-llm-orchestration: Added Python 3.9+ backend, Node.js 18+ frontend, Svelte 5 / SvelteKit frontend runtime + FastAPI, SQLAlchemy, Pydantic, existing `TaskManager`, existing `SupersetClient`, existing LLM provider stack, SvelteKit, Tailwind CSS, frontend `requestApi`/`fetchApi` wrappers -- 027-dataset-llm-orchestration: Added Python 3.9+ backend, Node.js 18+ frontend with Svelte 5 / SvelteKit + FastAPI, SQLAlchemy, Pydantic, existing [SupersetClient](../../backend/src/core/superset_client.py), existing frontend API wrapper patterns, Svelte runes, existing task/websocket stack -- 026-dashboard-health-windows: Added Python 3.9+ (Backend), Node.js 18+ / Svelte 5.x (Frontend) + FastAPI, SQLAlchemy, APScheduler (Backend) | SvelteKit, Tailwind CSS, existing UI components (Frontend) - - - - diff --git a/backend/src/api/routes/__tests__/test_assistant_api.py b/backend/src/api/routes/__tests__/test_assistant_api.py index 66ee9ced..379b42c9 100644 --- a/backend/src/api/routes/__tests__/test_assistant_api.py +++ b/backend/src/api/routes/__tests__/test_assistant_api.py @@ -5,7 +5,7 @@ os.environ["ENCRYPTION_KEY"] = "OnrCzomBWbIjTf7Y-fnhL2adlU55bHZQjp8zX5zBC5w=" # @COMPLEXITY: 3 # @SEMANTICS: tests, assistant, api # @PURPOSE: Validate assistant API endpoint logic via direct async handler invocation. -# @RELATION: DEPENDS_ON -> backend.src.api.routes.assistant +# @RELATION: DEPENDS_ON -> [AssistantApi] # @INVARIANT: Every test clears assistant in-memory state before execution. import asyncio diff --git a/backend/src/api/routes/__tests__/test_clean_release_v2_api.py b/backend/src/api/routes/__tests__/test_clean_release_v2_api.py index cdd09cde..afc7bed7 100644 --- a/backend/src/api/routes/__tests__/test_clean_release_v2_api.py +++ b/backend/src/api/routes/__tests__/test_clean_release_v2_api.py @@ -2,7 +2,7 @@ # @COMPLEXITY: 3 # @PURPOSE: API contract tests for redesigned clean release endpoints. # @LAYER: Domain -# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2 +# @RELATION: DEPENDS_ON -> [CleanReleaseV2Api] from datetime import datetime, timezone from types import SimpleNamespace diff --git a/backend/src/api/routes/__tests__/test_clean_release_v2_release_api.py b/backend/src/api/routes/__tests__/test_clean_release_v2_release_api.py index 0790d00c..11a7441a 100644 --- a/backend/src/api/routes/__tests__/test_clean_release_v2_release_api.py +++ b/backend/src/api/routes/__tests__/test_clean_release_v2_release_api.py @@ -2,7 +2,7 @@ # @COMPLEXITY: 3 # @PURPOSE: API contract test scaffolding for clean release approval and publication endpoints. # @LAYER: Domain -# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2 +# @RELATION: DEPENDS_ON -> [CleanReleaseV2Api] """Contract tests for redesigned approval/publication API endpoints.""" diff --git a/backend/src/api/routes/__tests__/test_dashboards.py b/backend/src/api/routes/__tests__/test_dashboards.py index 6b77cce0..6bb6bdb3 100644 --- a/backend/src/api/routes/__tests__/test_dashboards.py +++ b/backend/src/api/routes/__tests__/test_dashboards.py @@ -2,7 +2,7 @@ # @COMPLEXITY: 3 # @PURPOSE: Unit tests for dashboards API endpoints. # @LAYER: API -# @RELATION: DEPENDS_ON -> backend.src.api.routes.dashboards +# @RELATION: DEPENDS_ON -> [DashboardsApi] import pytest from unittest.mock import MagicMock, patch, AsyncMock diff --git a/backend/src/api/routes/assistant.py b/backend/src/api/routes/assistant.py index 80dc92f3..f9749e97 100644 --- a/backend/src/api/routes/assistant.py +++ b/backend/src/api/routes/assistant.py @@ -907,6 +907,8 @@ def _build_task_observability_summary(task: Any, config_manager: ConfigManager) # [/DEF:_build_task_observability_summary:Function] +from src.logger import belief_scope, logger + # [DEF:_parse_command:Function] # @COMPLEXITY: 4 # @PURPOSE: Deterministically parse RU/EN command text into intent payload. @@ -918,202 +920,63 @@ def _build_task_observability_summary(task: Any, config_manager: ConfigManager) # @POST: Returns intent dict with domain/operation/entities/confidence/risk fields. # @INVARIANT: every return path includes domain, operation, entities, confidence, risk_level, requires_confirmation. def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any]: - text = message.strip() - lower = text.lower() - - if any( - phrase in lower - for phrase in [ - "что ты умеешь", - "что умеешь", - "что ты можешь", - "help", - "помощь", - "доступные команды", - "какие команды", - ] - ): - return { - "domain": "assistant", - "operation": "show_capabilities", - "entities": {}, - "confidence": 0.98, - "risk_level": "safe", - "requires_confirmation": False, - } - - dashboard_id = _extract_id(lower, [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?(\d+)"]) - dashboard_ref = _extract_id( - lower, - [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?([a-zа-я0-9._-]+)"], - ) - dataset_id = _extract_id(lower, [r"(?:датасет\w*|dataset)\s*(?:id\s*)?(\d+)"]) - # Accept short and long task ids (e.g., task-1, task-abc123, UUIDs). - task_id = _extract_id(lower, [r"(task[-_a-z0-9]{1,}|[0-9a-f]{8}-[0-9a-f-]{27,})"]) - - # Status query - if any(k in lower for k in ["статус", "status", "state", "проверь задачу"]): - return { - "domain": "status", - "operation": "get_task_status", - "entities": {"task_id": task_id}, - "confidence": 0.92 if task_id else 0.66, - "risk_level": "safe", - "requires_confirmation": False, - } - - # Git branch create - if any(k in lower for k in ["ветк", "branch"]) and any( - k in lower for k in ["созд", "сделай", "create"] - ): - branch = _extract_id(lower, [r"(?:ветк\w*|branch)\s+([a-z0-9._/-]+)"]) - return { - "domain": "git", - "operation": "create_branch", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "branch_name": branch, - }, - "confidence": 0.95 if branch and dashboard_id else 0.7, - "risk_level": "guarded", - "requires_confirmation": False, - } - - # Git commit - if any(k in lower for k in ["коммит", "commit"]): - quoted = re.search(r'"([^"]{3,120})"', text) - message_text = ( - quoted.group(1) if quoted else "assistant: update dashboard changes" - ) - return { - "domain": "git", - "operation": "commit_changes", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "message": message_text, - }, - "confidence": 0.9 if dashboard_id else 0.7, - "risk_level": "guarded", - "requires_confirmation": False, - } - - # Git deploy - if any(k in lower for k in ["деплой", "deploy", "разверн"]): - env_match = _extract_id(lower, [r"(?:в|to)\s+([a-z0-9_-]+)"]) - is_dangerous = _is_production_env(env_match, config_manager) - return { - "domain": "git", - "operation": "deploy_dashboard", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "environment": env_match, - }, - "confidence": 0.92 if dashboard_id and env_match else 0.7, - "risk_level": "dangerous" if is_dangerous else "guarded", - "requires_confirmation": is_dangerous, - } - - # Migration - if any(k in lower for k in ["миграц", "migration", "migrate"]): - src = _extract_id(lower, [r"(?:с|from)\s+([a-z0-9_-]+)"]) - tgt = _extract_id(lower, [r"(?:на|to)\s+([a-z0-9_-]+)"]) - dry_run = "--dry-run" in lower or "dry run" in lower - replace_db_config = "--replace-db-config" in lower - fix_cross_filters = "--no-fix-cross-filters" not in lower - is_dangerous = _is_production_env(tgt, config_manager) - return { - "domain": "migration", - "operation": "execute_migration", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "source_env": src, - "target_env": tgt, - "dry_run": dry_run, - "replace_db_config": replace_db_config, - "fix_cross_filters": fix_cross_filters, - }, - "confidence": 0.95 if dashboard_id and src and tgt else 0.72, - "risk_level": "dangerous" if is_dangerous else "guarded", - "requires_confirmation": is_dangerous or dry_run, - } - - # Backup - if any(k in lower for k in ["бэкап", "backup", "резерв"]): - env_match = _extract_id(lower, [r"(?:в|for|из|from)\s+([a-z0-9_-]+)"]) - return { - "domain": "backup", - "operation": "run_backup", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "environment": env_match, - }, - "confidence": 0.9 if env_match else 0.7, - "risk_level": "guarded", - "requires_confirmation": False, - } - - # Health summary - if any(k in lower for k in ["здоровье", "health", "ошибки", "failing", "проблемы"]): - env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"]) - return { - "domain": "health", - "operation": "get_health_summary", - "entities": {"environment": env_match}, - "confidence": 0.9, - "risk_level": "safe", - "requires_confirmation": False, - } - - # LLM validation - if any(k in lower for k in ["валидац", "validate", "провер"]): - env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"]) - provider_match = _extract_id(lower, [r"(?:provider|провайдер)\s+([a-z0-9_-]+)"]) - return { - "domain": "llm", - "operation": "run_llm_validation", - "entities": { - "dashboard_id": int(dashboard_id) if dashboard_id else None, - "dashboard_ref": dashboard_ref - if (dashboard_ref and not dashboard_ref.isdigit()) - else None, - "environment": env_match, - "provider": provider_match, - }, - "confidence": 0.88 if dashboard_id else 0.64, - "risk_level": "guarded", - "requires_confirmation": False, - } - - # Documentation - if any( - k in lower - for k in ["документац", "documentation", "generate docs", "сгенерируй док"] - ): - env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"]) - provider_match = _extract_id(lower, [r"(?:provider|провайдер)\s+([a-z0-9_-]+)"]) - return { - "domain": "llm", - "operation": "run_llm_documentation", - "entities": { - "dataset_id": int(dataset_id) if dataset_id else None, - "environment": env_match, - "provider": provider_match, - }, - "confidence": 0.88 if dataset_id else 0.64, - "risk_level": "guarded", - "requires_confirmation": False, - } - - return { - "domain": "unknown", - "operation": "clarify", - "entities": {}, - "confidence": 0.3, - "risk_level": "safe", - "requires_confirmation": False, - } - - + with belief_scope('_parse_command'): + logger.reason('Belief protocol reasoning checkpoint for _parse_command') + text = message.strip() + lower = text.lower() + if any((phrase in lower for phrase in ['что ты умеешь', 'что умеешь', 'что ты можешь', 'help', 'помощь', 'доступные команды', 'какие команды'])): + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'assistant', 'operation': 'show_capabilities', 'entities': {}, 'confidence': 0.98, 'risk_level': 'safe', 'requires_confirmation': False} + dashboard_id = _extract_id(lower, ['(?:дашборд\\w*|dashboard)\\s*(?:id\\s*)?(\\d+)']) + dashboard_ref = _extract_id(lower, ['(?:дашборд\\w*|dashboard)\\s*(?:id\\s*)?([a-zа-я0-9._-]+)']) + dataset_id = _extract_id(lower, ['(?:датасет\\w*|dataset)\\s*(?:id\\s*)?(\\d+)']) + task_id = _extract_id(lower, ['(task[-_a-z0-9]{1,}|[0-9a-f]{8}-[0-9a-f-]{27,})']) + if any((k in lower for k in ['статус', 'status', 'state', 'проверь задачу'])): + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'status', 'operation': 'get_task_status', 'entities': {'task_id': task_id}, 'confidence': 0.92 if task_id else 0.66, 'risk_level': 'safe', 'requires_confirmation': False} + if any((k in lower for k in ['ветк', 'branch'])) and any((k in lower for k in ['созд', 'сделай', 'create'])): + branch = _extract_id(lower, ['(?:ветк\\w*|branch)\\s+([a-z0-9._/-]+)']) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'git', 'operation': 'create_branch', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'branch_name': branch}, 'confidence': 0.95 if branch and dashboard_id else 0.7, 'risk_level': 'guarded', 'requires_confirmation': False} + if any((k in lower for k in ['коммит', 'commit'])): + quoted = re.search('"([^"]{3,120})"', text) + message_text = quoted.group(1) if quoted else 'assistant: update dashboard changes' + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'git', 'operation': 'commit_changes', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'message': message_text}, 'confidence': 0.9 if dashboard_id else 0.7, 'risk_level': 'guarded', 'requires_confirmation': False} + if any((k in lower for k in ['деплой', 'deploy', 'разверн'])): + env_match = _extract_id(lower, ['(?:в|to)\\s+([a-z0-9_-]+)']) + is_dangerous = _is_production_env(env_match, config_manager) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'git', 'operation': 'deploy_dashboard', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'environment': env_match}, 'confidence': 0.92 if dashboard_id and env_match else 0.7, 'risk_level': 'dangerous' if is_dangerous else 'guarded', 'requires_confirmation': is_dangerous} + if any((k in lower for k in ['миграц', 'migration', 'migrate'])): + src = _extract_id(lower, ['(?:с|from)\\s+([a-z0-9_-]+)']) + tgt = _extract_id(lower, ['(?:на|to)\\s+([a-z0-9_-]+)']) + dry_run = '--dry-run' in lower or 'dry run' in lower + replace_db_config = '--replace-db-config' in lower + fix_cross_filters = '--no-fix-cross-filters' not in lower + is_dangerous = _is_production_env(tgt, config_manager) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'migration', 'operation': 'execute_migration', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'source_env': src, 'target_env': tgt, 'dry_run': dry_run, 'replace_db_config': replace_db_config, 'fix_cross_filters': fix_cross_filters}, 'confidence': 0.95 if dashboard_id and src and tgt else 0.72, 'risk_level': 'dangerous' if is_dangerous else 'guarded', 'requires_confirmation': is_dangerous or dry_run} + if any((k in lower for k in ['бэкап', 'backup', 'резерв'])): + env_match = _extract_id(lower, ['(?:в|for|из|from)\\s+([a-z0-9_-]+)']) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'backup', 'operation': 'run_backup', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'environment': env_match}, 'confidence': 0.9 if env_match else 0.7, 'risk_level': 'guarded', 'requires_confirmation': False} + if any((k in lower for k in ['здоровье', 'health', 'ошибки', 'failing', 'проблемы'])): + env_match = _extract_id(lower, ['(?:в|for|env|окружени[ея])\\s+([a-z0-9_-]+)']) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'health', 'operation': 'get_health_summary', 'entities': {'environment': env_match}, 'confidence': 0.9, 'risk_level': 'safe', 'requires_confirmation': False} + if any((k in lower for k in ['валидац', 'validate', 'провер'])): + env_match = _extract_id(lower, ['(?:в|for|env|окружени[ея])\\s+([a-z0-9_-]+)']) + provider_match = _extract_id(lower, ['(?:provider|провайдер)\\s+([a-z0-9_-]+)']) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'llm', 'operation': 'run_llm_validation', 'entities': {'dashboard_id': int(dashboard_id) if dashboard_id else None, 'dashboard_ref': dashboard_ref if dashboard_ref and (not dashboard_ref.isdigit()) else None, 'environment': env_match, 'provider': provider_match}, 'confidence': 0.88 if dashboard_id else 0.64, 'risk_level': 'guarded', 'requires_confirmation': False} + if any((k in lower for k in ['документац', 'documentation', 'generate docs', 'сгенерируй док'])): + env_match = _extract_id(lower, ['(?:в|for|env|окружени[ея])\\s+([a-z0-9_-]+)']) + provider_match = _extract_id(lower, ['(?:provider|провайдер)\\s+([a-z0-9_-]+)']) + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'llm', 'operation': 'run_llm_documentation', 'entities': {'dataset_id': int(dataset_id) if dataset_id else None, 'environment': env_match, 'provider': provider_match}, 'confidence': 0.88 if dataset_id else 0.64, 'risk_level': 'guarded', 'requires_confirmation': False} + logger.reflect('Belief protocol postcondition checkpoint for _parse_command') + return {'domain': 'unknown', 'operation': 'clarify', 'entities': {}, 'confidence': 0.3, 'risk_level': 'safe', 'requires_confirmation': False} # [/DEF:_parse_command:Function] @@ -1401,106 +1264,15 @@ _DATASET_REVIEW_OPS = { # @COMPLEXITY: 4 # @PURPOSE: Build assistant-safe dataset-review context snapshot with masked imported-filter payloads for session-scoped assistant routing. # @RELATION: [DEPENDS_ON] ->[DatasetReviewSession] -def _serialize_dataset_review_context( - session: DatasetReviewSession, -) -> Dict[str, Any]: - latest_preview = None - previews = getattr(session, "previews", []) or [] - if previews: - latest_preview = previews[-1] - return { - "session_id": session.session_id, - "version": int(getattr(session, "version", 0) or 0), - "dataset_ref": session.dataset_ref, - "dataset_id": session.dataset_id, - "environment_id": session.environment_id, - "readiness_state": session.readiness_state.value, - "recommended_action": session.recommended_action.value, - "status": session.status.value, - "current_phase": session.current_phase.value, - "findings": [ - { - "finding_id": item.finding_id, - "code": item.code, - "severity": item.severity.value, - "message": item.message, - "resolution_state": item.resolution_state.value, - } - for item in getattr(session, "findings", []) - ], - "imported_filters": [ - sanitize_imported_filter_for_assistant( - { - "filter_id": item.filter_id, - "filter_name": item.filter_name, - "display_name": item.display_name, - "raw_value": item.raw_value, - "raw_value_masked": bool(getattr(item, "raw_value_masked", False)), - "normalized_value": item.normalized_value, - "source": getattr(item.source, "value", item.source), - "confidence_state": getattr( - item.confidence_state, "value", item.confidence_state - ), - "requires_confirmation": bool(item.requires_confirmation), - "recovery_status": getattr( - item.recovery_status, "value", item.recovery_status - ), - "notes": item.notes, - } - ) - for item in getattr(session, "imported_filters", []) - ], - "mappings": [ - { - "mapping_id": item.mapping_id, - "filter_id": item.filter_id, - "variable_id": item.variable_id, - "mapping_method": getattr( - item.mapping_method, "value", item.mapping_method - ), - "effective_value": item.effective_value, - "approval_state": getattr( - item.approval_state, "value", item.approval_state - ), - "requires_explicit_approval": bool(item.requires_explicit_approval), - } - for item in getattr(session, "execution_mappings", []) - ], - "semantic_fields": [ - { - "field_id": item.field_id, - "field_name": item.field_name, - "verbose_name": item.verbose_name, - "description": item.description, - "display_format": item.display_format, - "provenance": getattr(item.provenance, "value", item.provenance), - "is_locked": bool(item.is_locked), - "needs_review": bool(item.needs_review), - "candidates": [ - { - "candidate_id": candidate.candidate_id, - "proposed_verbose_name": candidate.proposed_verbose_name, - "proposed_description": candidate.proposed_description, - "proposed_display_format": candidate.proposed_display_format, - "status": getattr(candidate.status, "value", candidate.status), - } - for candidate in getattr(item, "candidates", []) - ], - } - for item in getattr(session, "semantic_fields", []) - ], - "preview": { - "preview_id": latest_preview.preview_id, - "preview_status": getattr( - latest_preview.preview_status, "value", latest_preview.preview_status - ), - "compiled_sql": latest_preview.compiled_sql, - } - if latest_preview is not None - else None, - } - - +def _serialize_dataset_review_context(session: DatasetReviewSession) -> Dict[str, Any]: + with belief_scope('_serialize_dataset_review_context'): + logger.reason('Belief protocol reasoning checkpoint for _serialize_dataset_review_context') + latest_preview = None + previews = getattr(session, 'previews', []) or [] + if previews: + latest_preview = previews[-1] + logger.reflect('Belief protocol postcondition checkpoint for _serialize_dataset_review_context') + return {'session_id': session.session_id, 'version': int(getattr(session, 'version', 0) or 0), 'dataset_ref': session.dataset_ref, 'dataset_id': session.dataset_id, 'environment_id': session.environment_id, 'readiness_state': session.readiness_state.value, 'recommended_action': session.recommended_action.value, 'status': session.status.value, 'current_phase': session.current_phase.value, 'findings': [{'finding_id': item.finding_id, 'code': item.code, 'severity': item.severity.value, 'message': item.message, 'resolution_state': item.resolution_state.value} for item in getattr(session, 'findings', [])], 'imported_filters': [sanitize_imported_filter_for_assistant({'filter_id': item.filter_id, 'filter_name': item.filter_name, 'display_name': item.display_name, 'raw_value': item.raw_value, 'raw_value_masked': bool(getattr(item, 'raw_value_masked', False)), 'normalized_value': item.normalized_value, 'source': getattr(item.source, 'value', item.source), 'confidence_state': getattr(item.confidence_state, 'value', item.confidence_state), 'requires_confirmation': bool(item.requires_confirmation), 'recovery_status': getattr(item.recovery_status, 'value', item.recovery_status), 'notes': item.notes}) for item in getattr(session, 'imported_filters', [])], 'mappings': [{'mapping_id': item.mapping_id, 'filter_id': item.filter_id, 'variable_id': item.variable_id, 'mapping_method': getattr(item.mapping_method, 'value', item.mapping_method), 'effective_value': item.effective_value, 'approval_state': getattr(item.approval_state, 'value', item.approval_state), 'requires_explicit_approval': bool(item.requires_explicit_approval)} for item in getattr(session, 'execution_mappings', [])], 'semantic_fields': [{'field_id': item.field_id, 'field_name': item.field_name, 'verbose_name': item.verbose_name, 'description': item.description, 'display_format': item.display_format, 'provenance': getattr(item.provenance, 'value', item.provenance), 'is_locked': bool(item.is_locked), 'needs_review': bool(item.needs_review), 'candidates': [{'candidate_id': candidate.candidate_id, 'proposed_verbose_name': candidate.proposed_verbose_name, 'proposed_description': candidate.proposed_description, 'proposed_display_format': candidate.proposed_display_format, 'status': getattr(candidate.status, 'value', candidate.status)} for candidate in getattr(item, 'candidates', [])]} for item in getattr(session, 'semantic_fields', [])], 'preview': {'preview_id': latest_preview.preview_id, 'preview_status': getattr(latest_preview.preview_status, 'value', latest_preview.preview_status), 'compiled_sql': latest_preview.compiled_sql} if latest_preview is not None else None} # [/DEF:_serialize_dataset_review_context:Function] @@ -1508,20 +1280,17 @@ def _serialize_dataset_review_context( # @COMPLEXITY: 4 # @PURPOSE: Load owner-scoped dataset-review context for assistant planning and grounded response generation. # @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository] -def _load_dataset_review_context( - dataset_review_session_id: Optional[str], - current_user: User, - db: Session, -) -> Optional[Dict[str, Any]]: - if not dataset_review_session_id: - return None - repository = DatasetReviewSessionRepository(db) - session = repository.load_session_detail(dataset_review_session_id, current_user.id) - if session is None or session.user_id != current_user.id: - raise HTTPException(status_code=404, detail="Dataset review session not found") - return _serialize_dataset_review_context(session) - - +def _load_dataset_review_context(dataset_review_session_id: Optional[str], current_user: User, db: Session) -> Optional[Dict[str, Any]]: + with belief_scope('_load_dataset_review_context'): + if not dataset_review_session_id: + return None + logger.reason('Belief protocol reasoning checkpoint for _load_dataset_review_context') + repository = DatasetReviewSessionRepository(db) + session = repository.load_session_detail(dataset_review_session_id, current_user.id) + if session is None or session.user_id != current_user.id: + raise HTTPException(status_code=404, detail='Dataset review session not found') + logger.reflect('Belief protocol postcondition checkpoint for _load_dataset_review_context') + return _serialize_dataset_review_context(session) # [/DEF:_load_dataset_review_context:Function] @@ -1769,7 +1538,11 @@ async def _dispatch_dataset_review_intent( config_manager: ConfigManager, db: Session, ) -> Tuple[str, Optional[str], List[AssistantAction]]: - with belief_scope("assistant.dispatch_dataset_review_intent"): + with belief_scope("_dispatch_dataset_review_intent"): + logger.reason( + "Dispatching assistant dataset-review intent", + extra={"operation": intent.get("operation")}, + ) entities = intent.get("entities", {}) session_id = entities.get("dataset_review_session_id") session_version = entities.get("session_version") @@ -1986,141 +1759,66 @@ async def _dispatch_dataset_review_intent( # @PURPOSE: Build human-readable confirmation prompt for an intent before execution. # @PRE: intent contains operation and entities fields. # @POST: Returns descriptive Russian-language text ending with confirmation prompt. -async def _async_confirmation_summary( - intent: Dict[str, Any], config_manager: ConfigManager, db: Session -) -> str: - operation = intent.get("operation", "") - entities = intent.get("entities", {}) - descriptions: Dict[str, str] = { - "create_branch": "создание ветки{branch} для дашборда{dashboard}", - "commit_changes": "коммит изменений для дашборда{dashboard}", - "deploy_dashboard": "деплой дашборда{dashboard} в окружение{env}", - "execute_migration": "миграция дашборда{dashboard} с{src} на{tgt}", - "run_backup": "бэкап окружения{env}{dashboard}", - "run_llm_validation": "LLM-валидация дашборда{dashboard}{env}", - "run_llm_documentation": "генерация документации для датасета{dataset}{env}", - } - template = descriptions.get(operation) - if not template: - return "Подтвердите выполнение операции или отмените." - - def _label(value: Any, prefix: str = " ") -> str: - return f"{prefix}{value}" if value else "" - - dashboard = entities.get("dashboard_id") or entities.get("dashboard_ref") - text = template.format( - branch=_label(entities.get("branch_name")), - dashboard=_label(dashboard), - env=_label(entities.get("environment") or entities.get("target_env")), - src=_label(entities.get("source_env")), - tgt=_label(entities.get("target_env")), - dataset=_label(entities.get("dataset_id")), - ) - - if operation == "execute_migration": - flags = [] - flags.append( - "маппинг БД: " - + ( - "ВКЛ" - if _coerce_query_bool(entities.get("replace_db_config", False)) - else "ВЫКЛ" - ) - ) - flags.append( - "исправление кроссфильтров: " - + ( - "ВКЛ" - if _coerce_query_bool(entities.get("fix_cross_filters", True)) - else "ВЫКЛ" - ) - ) - dry_run_enabled = _coerce_query_bool(entities.get("dry_run", False)) - flags.append("отчет dry-run: " + ("ВКЛ" if dry_run_enabled else "ВЫКЛ")) - text += f" ({', '.join(flags)})" - - if dry_run_enabled: - try: - from ...core.migration.dry_run_orchestrator import ( - MigrationDryRunService, - ) - from ...models.dashboard import DashboardSelection - from ...core.superset_client import SupersetClient - - src_token = entities.get("source_env") - tgt_token = entities.get("target_env") - dashboard_id = _resolve_dashboard_id_entity( - entities, config_manager, env_hint=src_token - ) - - if dashboard_id and src_token and tgt_token: - src_env_id = _resolve_env_id(src_token, config_manager) - tgt_env_id = _resolve_env_id(tgt_token, config_manager) - - if src_env_id and tgt_env_id: - env_map = { - env.id: env for env in config_manager.get_environments() - } - source_env = env_map.get(src_env_id) - target_env = env_map.get(tgt_env_id) - - if source_env and target_env and source_env.id != target_env.id: - selection = DashboardSelection( - source_env_id=source_env.id, - target_env_id=target_env.id, - selected_ids=[dashboard_id], - replace_db_config=_coerce_query_bool( - entities.get("replace_db_config", False) - ), - fix_cross_filters=_coerce_query_bool( - entities.get("fix_cross_filters", True) - ), - ) - service = MigrationDryRunService() - source_client = SupersetClient(source_env) - target_client = SupersetClient(target_env) - report = service.run( - selection, source_client, target_client, db - ) - - s = report.get("summary", {}) - dash_s = s.get("dashboards", {}) - charts_s = s.get("charts", {}) - ds_s = s.get("datasets", {}) - - # Determine main actions counts - creates = ( - dash_s.get("create", 0) - + charts_s.get("create", 0) - + ds_s.get("create", 0) - ) - updates = ( - dash_s.get("update", 0) - + charts_s.get("update", 0) - + ds_s.get("update", 0) - ) - deletes = ( - dash_s.get("delete", 0) - + charts_s.get("delete", 0) - + ds_s.get("delete", 0) - ) - - text += f"\n\nОтчет dry-run:\n- Будет создано новых объектов: {creates}\n- Будет обновлено: {updates}\n- Будет удалено: {deletes}" - else: - text += "\n\n(Не удалось загрузить отчет dry-run: неверные окружения)." - except Exception as e: - import traceback - - logger.warning( - "[assistant.dry_run_summary][failed] Exception: %s\n%s", - e, - traceback.format_exc(), - ) - text += f"\n\n(Не удалось загрузить отчет dry-run: {e})." - - return f"Выполнить: {text}. Подтвердите или отмените." - +async def _async_confirmation_summary(intent: Dict[str, Any], config_manager: ConfigManager, db: Session) -> str: + with belief_scope('_confirmation_summary'): + logger.reason('Belief protocol reasoning checkpoint for _confirmation_summary') + operation = intent.get('operation', '') + entities = intent.get('entities', {}) + descriptions: Dict[str, str] = {'create_branch': 'создание ветки{branch} для дашборда{dashboard}', 'commit_changes': 'коммит изменений для дашборда{dashboard}', 'deploy_dashboard': 'деплой дашборда{dashboard} в окружение{env}', 'execute_migration': 'миграция дашборда{dashboard} с{src} на{tgt}', 'run_backup': 'бэкап окружения{env}{dashboard}', 'run_llm_validation': 'LLM-валидация дашборда{dashboard}{env}', 'run_llm_documentation': 'генерация документации для датасета{dataset}{env}'} + template = descriptions.get(operation) + if not template: + logger.reflect('Belief protocol postcondition checkpoint for _confirmation_summary') + return 'Подтвердите выполнение операции или отмените.' + def _label(value: Any, prefix: str=' ') -> str: + logger.reflect('Belief protocol postcondition checkpoint for _confirmation_summary') + return f'{prefix}{value}' if value else '' + dashboard = entities.get('dashboard_id') or entities.get('dashboard_ref') + text = template.format(branch=_label(entities.get('branch_name')), dashboard=_label(dashboard), env=_label(entities.get('environment') or entities.get('target_env')), src=_label(entities.get('source_env')), tgt=_label(entities.get('target_env')), dataset=_label(entities.get('dataset_id'))) + if operation == 'execute_migration': + flags = [] + flags.append('маппинг БД: ' + ('ВКЛ' if _coerce_query_bool(entities.get('replace_db_config', False)) else 'ВЫКЛ')) + flags.append('исправление кроссфильтров: ' + ('ВКЛ' if _coerce_query_bool(entities.get('fix_cross_filters', True)) else 'ВЫКЛ')) + dry_run_enabled = _coerce_query_bool(entities.get('dry_run', False)) + flags.append('отчет dry-run: ' + ('ВКЛ' if dry_run_enabled else 'ВЫКЛ')) + text += f" ({', '.join(flags)})" + if dry_run_enabled: + try: + from ...core.migration.dry_run_orchestrator import MigrationDryRunService + from ...models.dashboard import DashboardSelection + from ...core.superset_client import SupersetClient + src_token = entities.get('source_env') + tgt_token = entities.get('target_env') + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token) + if dashboard_id and src_token and tgt_token: + src_env_id = _resolve_env_id(src_token, config_manager) + tgt_env_id = _resolve_env_id(tgt_token, config_manager) + if src_env_id and tgt_env_id: + env_map = {env.id: env for env in config_manager.get_environments()} + source_env = env_map.get(src_env_id) + target_env = env_map.get(tgt_env_id) + if source_env and target_env and (source_env.id != target_env.id): + selection = DashboardSelection(source_env_id=source_env.id, target_env_id=target_env.id, selected_ids=[dashboard_id], replace_db_config=_coerce_query_bool(entities.get('replace_db_config', False)), fix_cross_filters=_coerce_query_bool(entities.get('fix_cross_filters', True))) + service = MigrationDryRunService() + source_client = SupersetClient(source_env) + target_client = SupersetClient(target_env) + report = service.run(selection, source_client, target_client, db) + s = report.get('summary', {}) + dash_s = s.get('dashboards', {}) + charts_s = s.get('charts', {}) + ds_s = s.get('datasets', {}) + creates = dash_s.get('create', 0) + charts_s.get('create', 0) + ds_s.get('create', 0) + updates = dash_s.get('update', 0) + charts_s.get('update', 0) + ds_s.get('update', 0) + deletes = dash_s.get('delete', 0) + charts_s.get('delete', 0) + ds_s.get('delete', 0) + text += f'\n\nОтчет dry-run:\n- Будет создано новых объектов: {creates}\n- Будет обновлено: {updates}\n- Будет удалено: {deletes}' + else: + text += '\n\n(Не удалось загрузить отчет dry-run: неверные окружения).' + except Exception as e: + import traceback + logger.warning('[assistant.dry_run_summary][failed] Exception: %s\n%s', e, traceback.format_exc()) + text += f'\n\n(Не удалось загрузить отчет dry-run: {e}).' + logger.reflect('Belief protocol postcondition checkpoint for _confirmation_summary') + return f'Выполнить: {text}. Подтвердите или отмените.' # [/DEF:_confirmation_summary:Function] @@ -2300,404 +1998,154 @@ def _authorize_intent(intent: Dict[str, Any], current_user: User): # @PRE: intent operation is known and actor permissions are validated per operation. # @POST: Returns response text, optional task id, and UI actions for follow-up. # @INVARIANT: unsupported operations are rejected via HTTPException(400). -async def _dispatch_intent( - intent: Dict[str, Any], - current_user: User, - task_manager: TaskManager, - config_manager: ConfigManager, - db: Session, -) -> Tuple[str, Optional[str], List[AssistantAction]]: - operation = intent.get("operation") - entities = intent.get("entities", {}) - - if operation in _DATASET_REVIEW_OPS or operation == "dataset_review_answer_context": - return await _dispatch_dataset_review_intent( - intent, - current_user, - config_manager, - db, - ) - - if operation == "show_capabilities": - tools_catalog = _build_tool_catalog(current_user, config_manager, db) - labels = { - "create_branch": "Git: создание ветки", - "commit_changes": "Git: коммит", - "deploy_dashboard": "Git: деплой дашборда", - "execute_migration": "Миграции: запуск переноса", - "run_backup": "Бэкапы: запуск резервного копирования", - "run_llm_validation": "LLM: валидация дашборда", - "run_llm_documentation": "LLM: генерация документации", - "get_task_status": "Статус: проверка задачи", - "get_health_summary": "Здоровье: сводка по дашбордам", - } - available = [ - labels[t["operation"]] for t in tools_catalog if t["operation"] in labels - ] - if not available: - return "Сейчас нет доступных для вас операций ассистента.", None, [] - commands = "\n".join(f"- {item}" for item in available) - text = ( - "Вот что я могу сделать для вас:\n" - f"{commands}\n\n" - "Пример: `запусти миграцию с dev на prod для дашборда 42`." - ) - return text, None, [] - - if operation == "get_health_summary": - from ...services.health_service import HealthService - - env_token = entities.get("environment") - env_id = _resolve_env_id(env_token, config_manager) - service = HealthService(db) - summary = await service.get_health_summary(environment_id=env_id) - - env_name = ( - _get_environment_name_by_id(env_id, config_manager) - if env_id - else "всех окружений" - ) - text = ( - f"Сводка здоровья дашбордов для {env_name}:\n" - f"- ✅ Прошли проверку: {summary.pass_count}\n" - f"- ⚠️ С предупреждениями: {summary.warn_count}\n" - f"- ❌ Ошибки валидации: {summary.fail_count}\n" - f"- ❓ Неизвестно: {summary.unknown_count}" - ) - - actions = [ - AssistantAction( - type="open_route", - label="Открыть Health Center", - target="/dashboards/health", - ) - ] - - if summary.fail_count > 0: - text += "\n\nОбнаружены ошибки в следующих дашбордах:" - for item in summary.items: - if item.status == "FAIL": - text += f"\n- {item.dashboard_id} ({item.environment_id}): {item.summary or 'Нет деталей'}" - actions.append( - AssistantAction( - type="open_route", - label=f"Отчет {item.dashboard_id}", - target=f"/reports/llm/{item.task_id}", - ) - ) - - return text, None, actions[:5] # Limit actions to avoid UI clutter - - if operation == "get_task_status": - _check_any_permission(current_user, [("tasks", "READ")]) - task_id = entities.get("task_id") - if not task_id: - recent = [ - t - for t in task_manager.get_tasks(limit=20, offset=0) - if t.user_id == current_user.id - ] - if not recent: - return "У вас пока нет задач в истории.", None, [] - task = recent[0] - actions = [ - AssistantAction(type="open_task", label="Open Task", target=task.id) - ] - if str(task.status).upper() in {"SUCCESS", "FAILED"}: +async def _dispatch_intent(intent: Dict[str, Any], current_user: User, task_manager: TaskManager, config_manager: ConfigManager, db: Session) -> Tuple[str, Optional[str], List[AssistantAction]]: + with belief_scope('_dispatch_intent'): + logger.reason('Belief protocol reasoning checkpoint for _dispatch_intent') + operation = intent.get('operation') + entities = intent.get('entities', {}) + if operation in _DATASET_REVIEW_OPS or operation == 'dataset_review_answer_context': + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return await _dispatch_dataset_review_intent(intent, current_user, config_manager, db) + if operation == 'show_capabilities': + tools_catalog = _build_tool_catalog(current_user, config_manager, db) + labels = {'create_branch': 'Git: создание ветки', 'commit_changes': 'Git: коммит', 'deploy_dashboard': 'Git: деплой дашборда', 'execute_migration': 'Миграции: запуск переноса', 'run_backup': 'Бэкапы: запуск резервного копирования', 'run_llm_validation': 'LLM: валидация дашборда', 'run_llm_documentation': 'LLM: генерация документации', 'get_task_status': 'Статус: проверка задачи', 'get_health_summary': 'Здоровье: сводка по дашбордам'} + available = [labels[t['operation']] for t in tools_catalog if t['operation'] in labels] + if not available: + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return ('Сейчас нет доступных для вас операций ассистента.', None, []) + commands = '\n'.join((f'- {item}' for item in available)) + text = f'Вот что я могу сделать для вас:\n{commands}\n\nПример: `запусти миграцию с dev на prod для дашборда 42`.' + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (text, None, []) + if operation == 'get_health_summary': + from ...services.health_service import HealthService + env_token = entities.get('environment') + env_id = _resolve_env_id(env_token, config_manager) + service = HealthService(db) + summary = await service.get_health_summary(environment_id=env_id) + env_name = _get_environment_name_by_id(env_id, config_manager) if env_id else 'всех окружений' + text = f'Сводка здоровья дашбордов для {env_name}:\n- ✅ Прошли проверку: {summary.pass_count}\n- ⚠️ С предупреждениями: {summary.warn_count}\n- ❌ Ошибки валидации: {summary.fail_count}\n- ❓ Неизвестно: {summary.unknown_count}' + actions = [AssistantAction(type='open_route', label='Открыть Health Center', target='/dashboards/health')] + if summary.fail_count > 0: + text += '\n\nОбнаружены ошибки в следующих дашбордах:' + for item in summary.items: + if item.status == 'FAIL': + text += f"\n- {item.dashboard_id} ({item.environment_id}): {item.summary or 'Нет деталей'}" + actions.append(AssistantAction(type='open_route', label=f'Отчет {item.dashboard_id}', target=f'/reports/llm/{item.task_id}')) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (text, None, actions[:5]) + if operation == 'get_task_status': + _check_any_permission(current_user, [('tasks', 'READ')]) + task_id = entities.get('task_id') + if not task_id: + recent = [t for t in task_manager.get_tasks(limit=20, offset=0) if t.user_id == current_user.id] + if not recent: + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return ('У вас пока нет задач в истории.', None, []) + task = recent[0] + actions = [AssistantAction(type='open_task', label='Open Task', target=task.id)] + if str(task.status).upper() in {'SUCCESS', 'FAILED'}: + actions.extend(_extract_result_deep_links(task, config_manager)) + summary_line = _build_task_observability_summary(task, config_manager) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Последняя задача: {task.id}, статус: {task.status}.' + (f'\n{summary_line}' if summary_line else ''), task.id, actions) + task = task_manager.get_task(task_id) + if not task: + raise HTTPException(status_code=404, detail=f'Task {task_id} not found') + actions = [AssistantAction(type='open_task', label='Open Task', target=task.id)] + if str(task.status).upper() in {'SUCCESS', 'FAILED'}: actions.extend(_extract_result_deep_links(task, config_manager)) summary_line = _build_task_observability_summary(task, config_manager) - return ( - f"Последняя задача: {task.id}, статус: {task.status}." - + (f"\n{summary_line}" if summary_line else ""), - task.id, - actions, - ) - - task = task_manager.get_task(task_id) - if not task: - raise HTTPException(status_code=404, detail=f"Task {task_id} not found") - actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)] - if str(task.status).upper() in {"SUCCESS", "FAILED"}: - actions.extend(_extract_result_deep_links(task, config_manager)) - summary_line = _build_task_observability_summary(task, config_manager) - return ( - f"Статус задачи {task.id}: {task.status}." - + (f"\n{summary_line}" if summary_line else ""), - task.id, - actions, - ) - - if operation == "create_branch": - _check_any_permission(current_user, [("plugin:git", "EXECUTE")]) - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) - branch_name = entities.get("branch_name") - if not dashboard_id or not branch_name: - raise HTTPException( - status_code=422, - detail="Missing dashboard_id/dashboard_ref or branch_name", - ) - git_service.create_branch(dashboard_id, branch_name, "main") - return f"Ветка `{branch_name}` создана для дашборда {dashboard_id}.", None, [] - - if operation == "commit_changes": - _check_any_permission(current_user, [("plugin:git", "EXECUTE")]) - dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) - commit_message = entities.get("message") - if not dashboard_id: - raise HTTPException( - status_code=422, detail="Missing dashboard_id/dashboard_ref" - ) - git_service.commit_changes(dashboard_id, commit_message, None) - return "Коммит выполнен успешно.", None, [] - - if operation == "deploy_dashboard": - _check_any_permission(current_user, [("plugin:git", "EXECUTE")]) - env_token = entities.get("environment") - env_id = _resolve_env_id(env_token, config_manager) - dashboard_id = _resolve_dashboard_id_entity( - entities, config_manager, env_hint=env_token - ) - if not dashboard_id or not env_id: - raise HTTPException( - status_code=422, - detail="Missing dashboard_id/dashboard_ref or environment", - ) - - task = await task_manager.create_task( - plugin_id="git-integration", - params={ - "operation": "deploy", - "dashboard_id": dashboard_id, - "environment_id": env_id, - }, - user_id=current_user.id, - ) - return ( - f"Деплой запущен. task_id={task.id}", - task.id, - [ - AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction( - type="open_reports", label="Open Reports", target="/reports" - ), - ], - ) - - if operation == "execute_migration": - _check_any_permission( - current_user, - [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")], - ) - src_token = entities.get("source_env") - dashboard_ref = entities.get("dashboard_ref") - dashboard_id = _resolve_dashboard_id_entity( - entities, config_manager, env_hint=src_token - ) - src = _resolve_env_id(src_token, config_manager) - tgt = _resolve_env_id(entities.get("target_env"), config_manager) - if not src or not tgt: - raise HTTPException(status_code=422, detail="Missing source_env/target_env") - if not dashboard_id and not dashboard_ref: - raise HTTPException( - status_code=422, detail="Missing dashboard_id/dashboard_ref" - ) - - migration_params: Dict[str, Any] = { - "source_env_id": src, - "target_env_id": tgt, - "replace_db_config": _coerce_query_bool( - entities.get("replace_db_config", False) - ), - "fix_cross_filters": _coerce_query_bool( - entities.get("fix_cross_filters", True) - ), - } - if dashboard_id: - migration_params["selected_ids"] = [dashboard_id] - else: - # Fallback: pass dashboard_ref as regex for the migration plugin to match - migration_params["dashboard_regex"] = str(dashboard_ref) - - task = await task_manager.create_task( - plugin_id="superset-migration", - params=migration_params, - user_id=current_user.id, - ) - return ( - f"Миграция запущена. task_id={task.id}", - task.id, - [ - AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction( - type="open_reports", label="Open Reports", target="/reports" - ), - *( - [ - AssistantAction( - type="open_route", - label=f"Открыть дашборд в {_get_environment_name_by_id(tgt, config_manager)}", - target=f"/dashboards/{dashboard_id}?env_id={tgt}", - ), - AssistantAction( - type="open_diff", - label="Показать Diff", - target=str(dashboard_id), - ), - ] - if dashboard_id - else [] - ), - ], - ) - - if operation == "run_backup": - _check_any_permission( - current_user, - [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")], - ) - env_token = entities.get("environment") - env_id = _resolve_env_id(env_token, config_manager) - if not env_id: - raise HTTPException( - status_code=400, detail="Missing or unknown environment" - ) - - params: Dict[str, Any] = {"environment_id": env_id} - if entities.get("dashboard_id") or entities.get("dashboard_ref"): - dashboard_id = _resolve_dashboard_id_entity( - entities, config_manager, env_hint=env_token - ) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Статус задачи {task.id}: {task.status}.' + (f'\n{summary_line}' if summary_line else ''), task.id, actions) + if operation == 'create_branch': + _check_any_permission(current_user, [('plugin:git', 'EXECUTE')]) + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) + branch_name = entities.get('branch_name') + if not dashboard_id or not branch_name: + raise HTTPException(status_code=422, detail='Missing dashboard_id/dashboard_ref or branch_name') + git_service.create_branch(dashboard_id, branch_name, 'main') + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Ветка `{branch_name}` создана для дашборда {dashboard_id}.', None, []) + if operation == 'commit_changes': + _check_any_permission(current_user, [('plugin:git', 'EXECUTE')]) + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager) + commit_message = entities.get('message') if not dashboard_id: - raise HTTPException( - status_code=422, detail="Missing dashboard_id/dashboard_ref" - ) - params["dashboard_ids"] = [dashboard_id] - - task = await task_manager.create_task( - plugin_id="superset-backup", - params=params, - user_id=current_user.id, - ) - return ( - f"Бэкап запущен. task_id={task.id}", - task.id, - [ - AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction( - type="open_reports", label="Open Reports", target="/reports" - ), - *( - [ - AssistantAction( - type="open_route", - label=f"Открыть дашборд в {_get_environment_name_by_id(env_id, config_manager)}", - target=f"/dashboards/{dashboard_id}?env_id={env_id}", - ), - AssistantAction( - type="open_diff", - label="Показать Diff", - target=str(dashboard_id), - ), - ] - if entities.get("dashboard_id") or entities.get("dashboard_ref") - else [] - ), - ], - ) - - if operation == "run_llm_validation": - _check_any_permission( - current_user, [("plugin:llm_dashboard_validation", "EXECUTE")] - ) - env_token = entities.get("environment") - env_id = _resolve_env_id( - env_token, config_manager - ) or _get_default_environment_id(config_manager) - dashboard_id = _resolve_dashboard_id_entity( - entities, config_manager, env_hint=env_token - ) - provider_id = _resolve_provider_id( - entities.get("provider"), - db, - config_manager=config_manager, - task_key="dashboard_validation", - ) - if not dashboard_id or not env_id or not provider_id: - raise HTTPException( - status_code=422, - detail="Missing dashboard_id/environment/provider. Укажите ID/slug дашборда или окружение.", - ) - provider = LLMProviderService(db).get_provider(provider_id) - provider_model = provider.default_model if provider else "" - if not is_multimodal_model( - provider_model, provider.provider_type if provider else None - ): - raise HTTPException( - status_code=422, - detail=( - "Selected provider model is not multimodal for dashboard validation. " - "Выберите мультимодальную модель (например, gpt-4o)." - ), - ) - - task = await task_manager.create_task( - plugin_id="llm_dashboard_validation", - params={ - "dashboard_id": str(dashboard_id), - "environment_id": env_id, - "provider_id": provider_id, - }, - user_id=current_user.id, - ) - return ( - f"LLM-валидация запущена. task_id={task.id}", - task.id, - [ - AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction( - type="open_reports", label="Open Reports", target="/reports" - ), - ], - ) - - if operation == "run_llm_documentation": - _check_any_permission(current_user, [("plugin:llm_documentation", "EXECUTE")]) - dataset_id = entities.get("dataset_id") - env_id = _resolve_env_id(entities.get("environment"), config_manager) - provider_id = _resolve_provider_id( - entities.get("provider"), - db, - config_manager=config_manager, - task_key="documentation", - ) - if not dataset_id or not env_id or not provider_id: - raise HTTPException( - status_code=400, detail="Missing dataset_id/environment/provider" - ) - - task = await task_manager.create_task( - plugin_id="llm_documentation", - params={ - "dataset_id": str(dataset_id), - "environment_id": env_id, - "provider_id": provider_id, - }, - user_id=current_user.id, - ) - return ( - f"Генерация документации запущена. task_id={task.id}", - task.id, - [ - AssistantAction(type="open_task", label="Open Task", target=task.id), - AssistantAction( - type="open_reports", label="Open Reports", target="/reports" - ), - ], - ) - - raise HTTPException(status_code=400, detail="Unsupported operation") - - + raise HTTPException(status_code=422, detail='Missing dashboard_id/dashboard_ref') + git_service.commit_changes(dashboard_id, commit_message, None) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return ('Коммит выполнен успешно.', None, []) + if operation == 'deploy_dashboard': + _check_any_permission(current_user, [('plugin:git', 'EXECUTE')]) + env_token = entities.get('environment') + env_id = _resolve_env_id(env_token, config_manager) + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + if not dashboard_id or not env_id: + raise HTTPException(status_code=422, detail='Missing dashboard_id/dashboard_ref or environment') + task = await task_manager.create_task(plugin_id='git-integration', params={'operation': 'deploy', 'dashboard_id': dashboard_id, 'environment_id': env_id}, user_id=current_user.id) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Деплой запущен. task_id={task.id}', task.id, [AssistantAction(type='open_task', label='Open Task', target=task.id), AssistantAction(type='open_reports', label='Open Reports', target='/reports')]) + if operation == 'execute_migration': + _check_any_permission(current_user, [('plugin:migration', 'EXECUTE'), ('plugin:superset-migration', 'EXECUTE')]) + src_token = entities.get('source_env') + dashboard_ref = entities.get('dashboard_ref') + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token) + src = _resolve_env_id(src_token, config_manager) + tgt = _resolve_env_id(entities.get('target_env'), config_manager) + if not src or not tgt: + raise HTTPException(status_code=422, detail='Missing source_env/target_env') + if not dashboard_id and (not dashboard_ref): + raise HTTPException(status_code=422, detail='Missing dashboard_id/dashboard_ref') + migration_params: Dict[str, Any] = {'source_env_id': src, 'target_env_id': tgt, 'replace_db_config': _coerce_query_bool(entities.get('replace_db_config', False)), 'fix_cross_filters': _coerce_query_bool(entities.get('fix_cross_filters', True))} + if dashboard_id: + migration_params['selected_ids'] = [dashboard_id] + else: + migration_params['dashboard_regex'] = str(dashboard_ref) + task = await task_manager.create_task(plugin_id='superset-migration', params=migration_params, user_id=current_user.id) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Миграция запущена. task_id={task.id}', task.id, [AssistantAction(type='open_task', label='Open Task', target=task.id), AssistantAction(type='open_reports', label='Open Reports', target='/reports'), *([AssistantAction(type='open_route', label=f'Открыть дашборд в {_get_environment_name_by_id(tgt, config_manager)}', target=f'/dashboards/{dashboard_id}?env_id={tgt}'), AssistantAction(type='open_diff', label='Показать Diff', target=str(dashboard_id))] if dashboard_id else [])]) + if operation == 'run_backup': + _check_any_permission(current_user, [('plugin:superset-backup', 'EXECUTE'), ('plugin:backup', 'EXECUTE')]) + env_token = entities.get('environment') + env_id = _resolve_env_id(env_token, config_manager) + if not env_id: + raise HTTPException(status_code=400, detail='Missing or unknown environment') + params: Dict[str, Any] = {'environment_id': env_id} + if entities.get('dashboard_id') or entities.get('dashboard_ref'): + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + if not dashboard_id: + raise HTTPException(status_code=422, detail='Missing dashboard_id/dashboard_ref') + params['dashboard_ids'] = [dashboard_id] + task = await task_manager.create_task(plugin_id='superset-backup', params=params, user_id=current_user.id) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Бэкап запущен. task_id={task.id}', task.id, [AssistantAction(type='open_task', label='Open Task', target=task.id), AssistantAction(type='open_reports', label='Open Reports', target='/reports'), *([AssistantAction(type='open_route', label=f'Открыть дашборд в {_get_environment_name_by_id(env_id, config_manager)}', target=f'/dashboards/{dashboard_id}?env_id={env_id}'), AssistantAction(type='open_diff', label='Показать Diff', target=str(dashboard_id))] if entities.get('dashboard_id') or entities.get('dashboard_ref') else [])]) + if operation == 'run_llm_validation': + _check_any_permission(current_user, [('plugin:llm_dashboard_validation', 'EXECUTE')]) + env_token = entities.get('environment') + env_id = _resolve_env_id(env_token, config_manager) or _get_default_environment_id(config_manager) + dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token) + provider_id = _resolve_provider_id(entities.get('provider'), db, config_manager=config_manager, task_key='dashboard_validation') + if not dashboard_id or not env_id or (not provider_id): + raise HTTPException(status_code=422, detail='Missing dashboard_id/environment/provider. Укажите ID/slug дашборда или окружение.') + provider = LLMProviderService(db).get_provider(provider_id) + provider_model = provider.default_model if provider else '' + if not is_multimodal_model(provider_model, provider.provider_type if provider else None): + raise HTTPException(status_code=422, detail='Selected provider model is not multimodal for dashboard validation. Выберите мультимодальную модель (например, gpt-4o).') + task = await task_manager.create_task(plugin_id='llm_dashboard_validation', params={'dashboard_id': str(dashboard_id), 'environment_id': env_id, 'provider_id': provider_id}, user_id=current_user.id) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'LLM-валидация запущена. task_id={task.id}', task.id, [AssistantAction(type='open_task', label='Open Task', target=task.id), AssistantAction(type='open_reports', label='Open Reports', target='/reports')]) + if operation == 'run_llm_documentation': + _check_any_permission(current_user, [('plugin:llm_documentation', 'EXECUTE')]) + dataset_id = entities.get('dataset_id') + env_id = _resolve_env_id(entities.get('environment'), config_manager) + provider_id = _resolve_provider_id(entities.get('provider'), db, config_manager=config_manager, task_key='documentation') + if not dataset_id or not env_id or (not provider_id): + raise HTTPException(status_code=400, detail='Missing dataset_id/environment/provider') + task = await task_manager.create_task(plugin_id='llm_documentation', params={'dataset_id': str(dataset_id), 'environment_id': env_id, 'provider_id': provider_id}, user_id=current_user.id) + logger.reflect('Belief protocol postcondition checkpoint for _dispatch_intent') + return (f'Генерация документации запущена. task_id={task.id}', task.id, [AssistantAction(type='open_task', label='Open Task', target=task.id), AssistantAction(type='open_reports', label='Open Reports', target='/reports')]) + raise HTTPException(status_code=400, detail='Unsupported operation') # [/DEF:_dispatch_intent:Function] @@ -2717,253 +2165,78 @@ async def _dispatch_intent( # @POST: Response state is one of clarification/confirmation/started/success/denied/failed. # @RETURN: AssistantMessageResponse with operation feedback and optional actions. # @INVARIANT: non-safe operations are gated with confirmation before execution from this endpoint. -async def send_message( - request: AssistantMessageRequest, - current_user: User = Depends(get_current_user), - task_manager: TaskManager = Depends(get_task_manager), - config_manager: ConfigManager = Depends(get_config_manager), - db: Session = Depends(get_db), -): - with belief_scope("assistant.send_message"): +async def send_message(request: AssistantMessageRequest, current_user: User=Depends(get_current_user), task_manager: TaskManager=Depends(get_task_manager), config_manager: ConfigManager=Depends(get_config_manager), db: Session=Depends(get_db)): + with belief_scope('send_message'): + logger.reason('Belief protocol reasoning checkpoint for send_message') user_id = current_user.id - dataset_review_context = _load_dataset_review_context( - request.dataset_review_session_id, - current_user, - db, - ) - conversation_id = _resolve_or_create_conversation( - user_id, request.conversation_id, db - ) - - _append_history(user_id, conversation_id, "user", request.message) - _persist_message(db, user_id, conversation_id, "user", request.message) - + dataset_review_context = _load_dataset_review_context(request.dataset_review_session_id, current_user, db) + conversation_id = _resolve_or_create_conversation(user_id, request.conversation_id, db) + _append_history(user_id, conversation_id, 'user', request.message) + _persist_message(db, user_id, conversation_id, 'user', request.message) tools_catalog = _build_tool_catalog(current_user, config_manager, db) intent = None try: - intent = await _plan_intent_with_llm( - request.message, tools_catalog, db, config_manager - ) + intent = await _plan_intent_with_llm(request.message, tools_catalog, db, config_manager) except Exception as exc: - logger.warning(f"[assistant.planner][fallback] Planner error: {exc}") + logger.warning(f'[assistant.planner][fallback] Planner error: {exc}') if not intent: intent = _parse_command(request.message, config_manager) if dataset_review_context: - dataset_review_intent = _plan_dataset_review_intent( - request.message, dataset_review_context - ) + dataset_review_intent = _plan_dataset_review_intent(request.message, dataset_review_context) if dataset_review_intent is not None: intent = dataset_review_intent - confidence = float(intent.get("confidence", 0.0)) - - if intent.get("domain") == "unknown" or confidence < 0.6: - text = "Команда неоднозначна. Уточните действие: git / migration / backup / llm / status." - _append_history( - user_id, conversation_id, "assistant", text, state="needs_clarification" - ) - _persist_message( - db, - user_id, - conversation_id, - "assistant", - text, - state="needs_clarification", - metadata={"intent": intent}, - ) - audit_payload = { - "decision": "needs_clarification", - "message": request.message, - "intent": intent, - "dataset_review_session_id": request.dataset_review_session_id, - } + confidence = float(intent.get('confidence', 0.0)) + if intent.get('domain') == 'unknown' or confidence < 0.6: + text = 'Команда неоднозначна. Уточните действие: git / migration / backup / llm / status.' + _append_history(user_id, conversation_id, 'assistant', text, state='needs_clarification') + _persist_message(db, user_id, conversation_id, 'assistant', text, state='needs_clarification', metadata={'intent': intent}) + audit_payload = {'decision': 'needs_clarification', 'message': request.message, 'intent': intent, 'dataset_review_session_id': request.dataset_review_session_id} _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) - return AssistantMessageResponse( - conversation_id=conversation_id, - response_id=str(uuid.uuid4()), - state="needs_clarification", - text=text, - intent=intent, - actions=[AssistantAction(type="rephrase", label="Rephrase command")], - created_at=datetime.utcnow(), - ) - + logger.reflect('Belief protocol postcondition checkpoint for send_message') + return AssistantMessageResponse(conversation_id=conversation_id, response_id=str(uuid.uuid4()), state='needs_clarification', text=text, intent=intent, actions=[AssistantAction(type='rephrase', label='Rephrase command')], created_at=datetime.utcnow()) try: _authorize_intent(intent, current_user) - - operation = intent.get("operation") + operation = intent.get('operation') if operation not in _SAFE_OPS: confirmation_id = str(uuid.uuid4()) - confirm = ConfirmationRecord( - id=confirmation_id, - user_id=user_id, - conversation_id=conversation_id, - intent=intent, - dispatch={"intent": intent}, - expires_at=datetime.utcnow() + timedelta(minutes=5), - created_at=datetime.utcnow(), - ) + confirm = ConfirmationRecord(id=confirmation_id, user_id=user_id, conversation_id=conversation_id, intent=intent, dispatch={'intent': intent}, expires_at=datetime.utcnow() + timedelta(minutes=5), created_at=datetime.utcnow()) CONFIRMATIONS[confirmation_id] = confirm _persist_confirmation(db, confirm) text = await _async_confirmation_summary(intent, config_manager, db) - _append_history( - user_id, - conversation_id, - "assistant", - text, - state="needs_confirmation", - confirmation_id=confirmation_id, - ) - _persist_message( - db, - user_id, - conversation_id, - "assistant", - text, - state="needs_confirmation", - confirmation_id=confirmation_id, - metadata={ - "intent": intent, - "dataset_review_context": dataset_review_context, - "actions": [ - { - "type": "confirm", - "label": "✅ Подтвердить", - "target": confirmation_id, - }, - { - "type": "cancel", - "label": "❌ Отменить", - "target": confirmation_id, - }, - ], - }, - ) - audit_payload = { - "decision": "needs_confirmation", - "message": request.message, - "intent": intent, - "confirmation_id": confirmation_id, - "dataset_review_session_id": request.dataset_review_session_id, - } + _append_history(user_id, conversation_id, 'assistant', text, state='needs_confirmation', confirmation_id=confirmation_id) + _persist_message(db, user_id, conversation_id, 'assistant', text, state='needs_confirmation', confirmation_id=confirmation_id, metadata={'intent': intent, 'dataset_review_context': dataset_review_context, 'actions': [{'type': 'confirm', 'label': '✅ Подтвердить', 'target': confirmation_id}, {'type': 'cancel', 'label': '❌ Отменить', 'target': confirmation_id}]}) + audit_payload = {'decision': 'needs_confirmation', 'message': request.message, 'intent': intent, 'confirmation_id': confirmation_id, 'dataset_review_session_id': request.dataset_review_session_id} _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) - return AssistantMessageResponse( - conversation_id=conversation_id, - response_id=str(uuid.uuid4()), - state="needs_confirmation", - text=text, - intent=intent, - confirmation_id=confirmation_id, - actions=[ - AssistantAction( - type="confirm", - label="✅ Подтвердить", - target=confirmation_id, - ), - AssistantAction( - type="cancel", label="❌ Отменить", target=confirmation_id - ), - ], - created_at=datetime.utcnow(), - ) - - # Read-only operations execute immediately - text, task_id, actions = await _dispatch_intent( - intent, current_user, task_manager, config_manager, db - ) - state = "started" if task_id else "success" - _append_history( - user_id, - conversation_id, - "assistant", - text, - state=state, - task_id=task_id, - ) - _persist_message( - db, - user_id, - conversation_id, - "assistant", - text, - state=state, - task_id=task_id, - metadata={ - "intent": intent, - "dataset_review_context": dataset_review_context, - "actions": [a.model_dump() for a in actions], - }, - ) - audit_payload = { - "decision": "executed", - "message": request.message, - "intent": intent, - "task_id": task_id, - "dataset_review_session_id": request.dataset_review_session_id, - } + logger.reflect('Belief protocol postcondition checkpoint for send_message') + return AssistantMessageResponse(conversation_id=conversation_id, response_id=str(uuid.uuid4()), state='needs_confirmation', text=text, intent=intent, confirmation_id=confirmation_id, actions=[AssistantAction(type='confirm', label='✅ Подтвердить', target=confirmation_id), AssistantAction(type='cancel', label='❌ Отменить', target=confirmation_id)], created_at=datetime.utcnow()) + text, task_id, actions = await _dispatch_intent(intent, current_user, task_manager, config_manager, db) + state = 'started' if task_id else 'success' + _append_history(user_id, conversation_id, 'assistant', text, state=state, task_id=task_id) + _persist_message(db, user_id, conversation_id, 'assistant', text, state=state, task_id=task_id, metadata={'intent': intent, 'dataset_review_context': dataset_review_context, 'actions': [a.model_dump() for a in actions]}) + audit_payload = {'decision': 'executed', 'message': request.message, 'intent': intent, 'task_id': task_id, 'dataset_review_session_id': request.dataset_review_session_id} _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) - return AssistantMessageResponse( - conversation_id=conversation_id, - response_id=str(uuid.uuid4()), - state=state, - text=text, - intent=intent, - task_id=task_id, - actions=actions, - created_at=datetime.utcnow(), - ) + logger.reflect('Belief protocol postcondition checkpoint for send_message') + return AssistantMessageResponse(conversation_id=conversation_id, response_id=str(uuid.uuid4()), state=state, text=text, intent=intent, task_id=task_id, actions=actions, created_at=datetime.utcnow()) except HTTPException as exc: detail_text = str(exc.detail) - is_clarification_error = exc.status_code in (400, 422) and ( - detail_text.lower().startswith("missing") - or "укажите" in detail_text.lower() - or "выберите" in detail_text.lower() - ) + is_clarification_error = exc.status_code in (400, 422) and (detail_text.lower().startswith('missing') or 'укажите' in detail_text.lower() or 'выберите' in detail_text.lower()) if exc.status_code == status.HTTP_403_FORBIDDEN: - state = "denied" + state = 'denied' elif is_clarification_error: - state = "needs_clarification" + state = 'needs_clarification' else: - state = "failed" - text = ( - _clarification_text_for_intent(intent, detail_text) - if state == "needs_clarification" - else detail_text - ) - _append_history(user_id, conversation_id, "assistant", text, state=state) - _persist_message( - db, - user_id, - conversation_id, - "assistant", - text, - state=state, - metadata={"intent": intent}, - ) - audit_payload = { - "decision": state, - "message": request.message, - "intent": intent, - "error": text, - "dataset_review_session_id": request.dataset_review_session_id, - } + state = 'failed' + text = _clarification_text_for_intent(intent, detail_text) if state == 'needs_clarification' else detail_text + _append_history(user_id, conversation_id, 'assistant', text, state=state) + _persist_message(db, user_id, conversation_id, 'assistant', text, state=state, metadata={'intent': intent}) + audit_payload = {'decision': state, 'message': request.message, 'intent': intent, 'error': text, 'dataset_review_session_id': request.dataset_review_session_id} _audit(user_id, audit_payload) _persist_audit(db, user_id, audit_payload, conversation_id) - return AssistantMessageResponse( - conversation_id=conversation_id, - response_id=str(uuid.uuid4()), - state=state, - text=text, - intent=intent, - actions=[AssistantAction(type="rephrase", label="Rephrase command")] - if state == "needs_clarification" - else [], - created_at=datetime.utcnow(), - ) - - + logger.reflect('Belief protocol postcondition checkpoint for send_message') + return AssistantMessageResponse(conversation_id=conversation_id, response_id=str(uuid.uuid4()), state=state, text=text, intent=intent, actions=[AssistantAction(type='rephrase', label='Rephrase command')] if state == 'needs_clarification' else [], created_at=datetime.utcnow()) # [/DEF:send_message:Function] diff --git a/backend/src/api/routes/clean_release.py b/backend/src/api/routes/clean_release.py index e6cba53e..dee53be8 100644 --- a/backend/src/api/routes/clean_release.py +++ b/backend/src/api/routes/clean_release.py @@ -1,10 +1,13 @@ -# [DEF:backend.src.api.routes.clean_release:Module] +# [DEF:CleanReleaseApi:Module] # @COMPLEXITY: 4 # @SEMANTICS: api, clean-release, candidate-preparation, compliance # @PURPOSE: Expose clean release endpoints for candidate preparation and subsequent compliance flow. # @LAYER: API -# @RELATION: DEPENDS_ON -> backend.src.dependencies.get_clean_release_repository -# @RELATION: DEPENDS_ON -> backend.src.services.clean_release.preparation_service +# @RELATION: DEPENDS_ON -> [get_clean_release_repository] +# @RELATION: DEPENDS_ON -> [PreparationService] +# @PRE: Clean release repository and preparation service dependencies are configured for the current request scope. +# @POST: Candidate preparation, manifest, and compliance routes expose deterministic API payloads without reporting prepared state on failed preparation. +# @SIDE_EFFECT: Persists candidate/compliance lifecycle state and triggers clean-release orchestration services. # @INVARIANT: API never reports prepared status if preparation errors are present. from __future__ import annotations @@ -633,4 +636,4 @@ async def get_report( # [/DEF:get_report:Function] -# [/DEF:backend.src.api.routes.clean_release:Module] +# [/DEF:CleanReleaseApi:Module] diff --git a/backend/src/api/routes/dashboards.py b/backend/src/api/routes/dashboards.py index 10a0e720..bb4f212d 100644 --- a/backend/src/api/routes/dashboards.py +++ b/backend/src/api/routes/dashboards.py @@ -1295,6 +1295,7 @@ async def get_dashboard_tasks_history( # [DEF:get_dashboard_thumbnail:Function] # @COMPLEXITY: 3 # @PURPOSE: Proxies Superset dashboard thumbnail with cache support. +# @RELATION: CALLS ->[AsyncSupersetClient] # @PRE: env_id must exist. # @POST: Returns image bytes or 202 when thumbnail is being prepared by Superset. @router.get("/{dashboard_ref}/thumbnail") diff --git a/backend/src/api/routes/dataset_review.py b/backend/src/api/routes/dataset_review.py index 9a0d320e..02c81a85 100644 --- a/backend/src/api/routes/dataset_review.py +++ b/backend/src/api/routes/dataset_review.py @@ -448,48 +448,34 @@ def _require_session_version_header( # [/DEF:_require_session_version_header:Function] +from src.logger import belief_scope, logger + # [DEF:_enforce_session_version:Function] # @COMPLEXITY: 4 # @PURPOSE: Convert repository optimistic-lock conflicts into deterministic HTTP 409 responses. # @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository] -def _enforce_session_version( - repository: DatasetReviewSessionRepository, - session: DatasetReviewSession, - expected_version: int, -) -> DatasetReviewSession: - try: - repository.require_session_version(session, expected_version) - return session - except DatasetReviewSessionVersionConflictError as exc: - raise HTTPException( - status_code=status.HTTP_409_CONFLICT, - detail={ - "error_code": "session_version_conflict", - "message": str(exc), - "session_id": exc.session_id, - "expected_version": exc.expected_version, - "actual_version": exc.actual_version, - }, - ) from exc - - +def _enforce_session_version(repository: DatasetReviewSessionRepository, session: DatasetReviewSession, expected_version: int) -> DatasetReviewSession: + with belief_scope('_enforce_session_version'): + logger.reason('Belief protocol reasoning checkpoint for _enforce_session_version') + try: + repository.require_session_version(session, expected_version) + logger.reflect('Belief protocol postcondition checkpoint for _enforce_session_version') + return session + except DatasetReviewSessionVersionConflictError as exc: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail={'error_code': 'session_version_conflict', 'message': str(exc), 'session_id': exc.session_id, 'expected_version': exc.expected_version, 'actual_version': exc.actual_version}) from exc # [/DEF:_enforce_session_version:Function] # [DEF:_prepare_owned_session_mutation:Function] # @COMPLEXITY: 4 # @PURPOSE: Resolve owner-scoped mutation session and enforce optimistic-lock version before changing dataset review state. -def _prepare_owned_session_mutation( - repository: DatasetReviewSessionRepository, - session_id: str, - current_user: User, - expected_version: int, -) -> DatasetReviewSession: - session = _get_owned_session_or_404(repository, session_id, current_user) - _require_owner_mutation_scope(session, current_user) - return _enforce_session_version(repository, session, expected_version) - - +def _prepare_owned_session_mutation(repository: DatasetReviewSessionRepository, session_id: str, current_user: User, expected_version: int) -> DatasetReviewSession: + with belief_scope('_prepare_owned_session_mutation'): + logger.reason('Belief protocol reasoning checkpoint for _prepare_owned_session_mutation') + session = _get_owned_session_or_404(repository, session_id, current_user) + _require_owner_mutation_scope(session, current_user) + logger.reflect('Belief protocol postcondition checkpoint for _prepare_owned_session_mutation') + return _enforce_session_version(repository, session, expected_version) # [/DEF:_prepare_owned_session_mutation:Function] @@ -497,20 +483,16 @@ def _prepare_owned_session_mutation( # @COMPLEXITY: 4 # @PURPOSE: Centralize dataset-review session version bumping and commit semantics for owner-scoped mutation endpoints. # @RELATION: [DEPENDS_ON] ->[DatasetReviewSessionRepository] -def _commit_owned_session_mutation( - repository: DatasetReviewSessionRepository, - session: DatasetReviewSession, - *, - refresh_targets: Optional[List[Any]] = None, -) -> DatasetReviewSession: - repository.bump_session_version(session) - repository.db.commit() - repository.db.refresh(session) - for target in refresh_targets or []: - repository.db.refresh(target) - return session - - +def _commit_owned_session_mutation(repository: DatasetReviewSessionRepository, session: DatasetReviewSession, *, refresh_targets: Optional[List[Any]]=None) -> DatasetReviewSession: + with belief_scope('_commit_owned_session_mutation'): + logger.reason('Belief protocol reasoning checkpoint for _commit_owned_session_mutation') + repository.bump_session_version(session) + repository.db.commit() + repository.db.refresh(session) + for target in refresh_targets or []: + repository.db.refresh(target) + logger.reflect('Belief protocol postcondition checkpoint for _commit_owned_session_mutation') + return session # [/DEF:_commit_owned_session_mutation:Function] diff --git a/backend/src/api/routes/datasets.py b/backend/src/api/routes/datasets.py index 2dc14ba6..0bd38b21 100644 --- a/backend/src/api/routes/datasets.py +++ b/backend/src/api/routes/datasets.py @@ -5,8 +5,8 @@ # @PURPOSE: API endpoints for the Dataset Hub - listing datasets with mapping progress # @LAYER: API # @RELATION: DEPENDS_ON ->[AppDependencies] -# @RELATION: DEPENDS_ON ->[backend.src.services.resource_service.ResourceService] -# @RELATION: DEPENDS_ON ->[backend.src.core.superset_client.SupersetClient] +# @RELATION: DEPENDS_ON ->[ResourceService] +# @RELATION: DEPENDS_ON ->[SupersetClient] # # @INVARIANT: All dataset responses include last_task metadata @@ -178,7 +178,7 @@ async def get_dataset_ids( # @PARAM: page (Optional[int]) - Page number (default: 1) # @PARAM: page_size (Optional[int]) - Items per page (default: 10, max: 100) # @RETURN: DatasetsResponse - List of datasets with status metadata -# @RELATION: CALLS ->[backend.src.services.resource_service.ResourceService.get_datasets_with_status] +# @RELATION: CALLS ->[get_datasets_with_status] @router.get("", response_model=DatasetsResponse) async def get_datasets( env_id: str, @@ -266,8 +266,8 @@ class MapColumnsRequest(BaseModel): # @POST: Task is created and queued for execution # @PARAM: request (MapColumnsRequest) - Mapping request with environment and dataset IDs # @RETURN: TaskResponse - Task ID for tracking -# @RELATION: DISPATCHES ->[backend.src.plugins.mapper.MapperPlugin] -# @RELATION: CALLS ->[backend.src.core.task_manager.manager.TaskManager:create_task] +# @RELATION: DISPATCHES ->[MapperPlugin] +# @RELATION: CALLS ->[create_task] @router.post("/map-columns", response_model=TaskResponse) async def map_columns( request: MapColumnsRequest, @@ -338,8 +338,8 @@ class GenerateDocsRequest(BaseModel): # @POST: Task is created and queued for execution # @PARAM: request (GenerateDocsRequest) - Documentation generation request # @RETURN: TaskResponse - Task ID for tracking -# @RELATION: DISPATCHES ->[backend.src.plugins.llm_analysis.plugin.DocumentationPlugin] -# @RELATION: CALLS ->[backend.src.core.task_manager.manager.TaskManager:create_task] +# @RELATION: DISPATCHES ->[DocumentationPlugin] +# @RELATION: CALLS ->[create_task] @router.post("/generate-docs", response_model=TaskResponse) async def generate_docs( request: GenerateDocsRequest, @@ -393,7 +393,7 @@ async def generate_docs( # @PARAM: env_id (str) - The environment ID # @PARAM: dataset_id (int) - The dataset ID # @RETURN: DatasetDetailResponse - Detailed dataset information -# @RELATION: CALLS ->[backend.src.core.superset_client.SupersetClient:get_dataset_detail] +# @RELATION: CALLS ->[SupersetClientGetDatasetDetail] @router.get("/{dataset_id}", response_model=DatasetDetailResponse) async def get_dataset_detail( env_id: str, diff --git a/backend/src/services/profile_service.py b/backend/src/services/profile_service.py index 1f835190..89133a22 100644 --- a/backend/src/services/profile_service.py +++ b/backend/src/services/profile_service.py @@ -4,12 +4,12 @@ # @SEMANTICS: profile, service, validation, ownership, filtering, superset, preferences # @PURPOSE: Orchestrates profile preference persistence, Superset account lookup, and deterministic actor matching. # @LAYER: Domain -# @RELATION: DEPENDS_ON -> backend.src.models.profile -# @RELATION: DEPENDS_ON -> backend.src.schemas.profile -# @RELATION: DEPENDS_ON -> backend.src.core.superset_client -# @RELATION: DEPENDS_ON -> backend.src.core.auth.repository -# @RELATION: DEPENDS_ON -> backend.src.models.auth -# @RELATION: DEPENDS_ON -> sqlalchemy.orm.Session +# @RELATION: DEPENDS_ON -> [UserDashboardPreference] +# @RELATION: DEPENDS_ON -> [ProfilePreferenceResponse] +# @RELATION: DEPENDS_ON -> [SupersetClient] +# @RELATION: DEPENDS_ON -> [AuthRepositoryModule] +# @RELATION: DEPENDS_ON -> [User] +# @RELATION: DEPENDS_ON -> [sqlalchemy.orm.Session] # # @INVARIANT: Preference mutations are always scoped to authenticated user identity. # @INVARIANT: Username normalization is trim+lower and shared by save and matching paths.