Compare commits
64 Commits
018-task-l
...
f018b97ed2
| Author | SHA1 | Date | |
|---|---|---|---|
| f018b97ed2 | |||
| 72846aa835 | |||
| 994c0c3e5d | |||
| 252a8601a9 | |||
| 8044f85ea4 | |||
| d4109e5a03 | |||
| b2bbd73439 | |||
| 0e0e26e2f7 | |||
| 18b42f8dd0 | |||
| e7b31accd6 | |||
| d3c3a80ed2 | |||
| cc244c2d86 | |||
| d10c23e658 | |||
| 1042b35d1b | |||
| 16ffeb1ed6 | |||
| da34deac02 | |||
| 51e9ee3fcc | |||
| edf9286071 | |||
| a542e7d2df | |||
| a863807cf2 | |||
| e2bc68683f | |||
| 43cb82697b | |||
| 4ba28cf93e | |||
| 343f2e29f5 | |||
| c9a53578fd | |||
| 07ec2d9797 | |||
| e9d3f3c827 | |||
| 26ba015b75 | |||
| 49129d3e86 | |||
| d99a13d91f | |||
| 203ce446f4 | |||
| c96d50a3f4 | |||
| 3bbe320949 | |||
| 2d2435642d | |||
| ec8d67c956 | |||
| 76baeb1038 | |||
| 11c59fb420 | |||
| b2529973eb | |||
| ae1d630ad6 | |||
| 9a9c5879e6 | |||
| 696aac32e7 | |||
| 7a9b1a190a | |||
| a3dc1fb2b9 | |||
| 297b29986d | |||
| 4c6fc8256d | |||
| a747a163c8 | |||
| fce0941e98 | |||
| 45c077b928 | |||
| 9ed3a5992d | |||
| a032fe8457 | |||
| 4c9d554432 | |||
| 6962a78112 | |||
| 3d75a21127 | |||
| 07914c8728 | |||
| cddc259b76 | |||
| dcbf0a7d7f | |||
| 65f61c1f80 | |||
| cb7386f274 | |||
| 83e34e1799 | |||
| d197303b9f | |||
| a43f8fb021 | |||
| 4aa01b6470 | |||
| 35b423979d | |||
| 2ffc3cc68f |
@@ -1 +0,0 @@
|
||||
{"print(f'Length": {"else": "print('Provider not found')\ndb.close()"}}
|
||||
117147
backend/logs/app.log.1
117147
backend/logs/app.log.1
File diff suppressed because it is too large
Load Diff
BIN
backend/mappings.db
Normal file
BIN
backend/mappings.db
Normal file
Binary file not shown.
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.api.routes.environments:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: api, environments, superset, databases
|
||||
# @PURPOSE: API endpoints for listing environments and their databases.
|
||||
# @LAYER: API
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.api.routes.git:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: git, routes, api, fastapi, repository, deployment
|
||||
# @PURPOSE: Provides FastAPI endpoints for Git integration operations.
|
||||
# @LAYER: API
|
||||
|
||||
@@ -143,15 +143,6 @@ async def test_connection(
|
||||
raise HTTPException(status_code=404, detail="Provider not found")
|
||||
|
||||
api_key = service.get_decrypted_api_key(provider_id)
|
||||
|
||||
# Check if API key was successfully decrypted
|
||||
if not api_key:
|
||||
logger.error(f"[llm_routes][test_connection] Failed to decrypt API key for provider {provider_id}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to decrypt API key. The provider may have been encrypted with a different encryption key. Please update the provider with a new API key."
|
||||
)
|
||||
|
||||
client = LLMClient(
|
||||
provider_type=LLMProviderType(db_provider.provider_type),
|
||||
api_key=api_key,
|
||||
@@ -182,13 +173,6 @@ async def test_provider_config(
|
||||
from ...plugins.llm_analysis.service import LLMClient
|
||||
logger.info(f"[llm_routes][test_provider_config][Action] Testing config for {config.name}")
|
||||
|
||||
# Check if API key is provided
|
||||
if not config.api_key or config.api_key == "********":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="API key is required for testing connection"
|
||||
)
|
||||
|
||||
client = LLMClient(
|
||||
provider_type=config.provider_type,
|
||||
api_key=config.api_key,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.api.routes.mappings:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: api, mappings, database, fuzzy-matching
|
||||
# @PURPOSE: API endpoints for managing database mappings and getting suggestions.
|
||||
# @LAYER: API
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:backend.src.api.routes.migration:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: api, migration, dashboards
|
||||
# @PURPOSE: API endpoints for migration operations.
|
||||
# @LAYER: API
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:PluginsRouter:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: api, router, plugins, list
|
||||
# @PURPOSE: Defines the FastAPI router for plugin-related endpoints, allowing clients to list available plugins.
|
||||
# @LAYER: UI (API)
|
||||
|
||||
@@ -12,25 +12,15 @@
|
||||
# [SECTION: IMPORTS]
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from typing import List
|
||||
from pydantic import BaseModel
|
||||
from ...core.config_models import AppConfig, Environment, GlobalSettings, LoggingConfig
|
||||
from ...core.config_models import AppConfig, Environment, GlobalSettings
|
||||
from ...models.storage import StorageConfig
|
||||
from ...dependencies import get_config_manager, has_permission
|
||||
from ...core.config_manager import ConfigManager
|
||||
from ...core.logger import logger, belief_scope, get_task_log_level
|
||||
from ...core.logger import logger, belief_scope
|
||||
from ...core.superset_client import SupersetClient
|
||||
import os
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:LoggingConfigResponse:Class]
|
||||
# @PURPOSE: Response model for logging configuration with current task log level.
|
||||
# @SEMANTICS: logging, config, response
|
||||
class LoggingConfigResponse(BaseModel):
|
||||
level: str
|
||||
task_log_level: str
|
||||
enable_belief_state: bool
|
||||
# [/DEF:LoggingConfigResponse:Class]
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# [DEF:get_settings:Function]
|
||||
@@ -233,50 +223,5 @@ async def test_environment_connection(
|
||||
return {"status": "error", "message": str(e)}
|
||||
# [/DEF:test_environment_connection:Function]
|
||||
|
||||
# [DEF:get_logging_config:Function]
|
||||
# @PURPOSE: Retrieves current logging configuration.
|
||||
# @PRE: Config manager is available.
|
||||
# @POST: Returns logging configuration.
|
||||
# @RETURN: LoggingConfigResponse - The current logging config.
|
||||
@router.get("/logging", response_model=LoggingConfigResponse)
|
||||
async def get_logging_config(
|
||||
config_manager: ConfigManager = Depends(get_config_manager),
|
||||
_ = Depends(has_permission("admin:settings", "READ"))
|
||||
):
|
||||
with belief_scope("get_logging_config"):
|
||||
logging_config = config_manager.get_config().settings.logging
|
||||
return LoggingConfigResponse(
|
||||
level=logging_config.level,
|
||||
task_log_level=logging_config.task_log_level,
|
||||
enable_belief_state=logging_config.enable_belief_state
|
||||
)
|
||||
# [/DEF:get_logging_config:Function]
|
||||
|
||||
# [DEF:update_logging_config:Function]
|
||||
# @PURPOSE: Updates logging configuration.
|
||||
# @PRE: New logging config is provided.
|
||||
# @POST: Logging configuration is updated and saved.
|
||||
# @PARAM: config (LoggingConfig) - The new logging configuration.
|
||||
# @RETURN: LoggingConfigResponse - The updated logging config.
|
||||
@router.patch("/logging", response_model=LoggingConfigResponse)
|
||||
async def update_logging_config(
|
||||
config: LoggingConfig,
|
||||
config_manager: ConfigManager = Depends(get_config_manager),
|
||||
_ = Depends(has_permission("admin:settings", "WRITE"))
|
||||
):
|
||||
with belief_scope("update_logging_config"):
|
||||
logger.info(f"[update_logging_config][Entry] Updating logging config: level={config.level}, task_log_level={config.task_log_level}")
|
||||
|
||||
# Get current settings and update logging config
|
||||
settings = config_manager.get_config().settings
|
||||
settings.logging = config
|
||||
config_manager.update_global_settings(settings)
|
||||
|
||||
return LoggingConfigResponse(
|
||||
level=config.level,
|
||||
task_log_level=config.task_log_level,
|
||||
enable_belief_state=config.enable_belief_state
|
||||
)
|
||||
# [/DEF:update_logging_config:Function]
|
||||
|
||||
# [/DEF:SettingsRouter:Module]
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:storage_routes:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: storage, files, upload, download, backup, repository
|
||||
# @PURPOSE: API endpoints for file storage management (backups and repositories).
|
||||
# @LAYER: API
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
# [DEF:TasksRouter:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: api, router, tasks, create, list, get, logs
|
||||
# @SEMANTICS: api, router, tasks, create, list, get
|
||||
# @PURPOSE: Defines the FastAPI router for task-related endpoints, allowing clients to create, list, and get the status of tasks.
|
||||
# @LAYER: UI (API)
|
||||
# @RELATION: Depends on the TaskManager. It is included by the main app.
|
||||
from typing import List, Dict, Any, Optional
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel
|
||||
from ...core.logger import belief_scope
|
||||
|
||||
from ...core.task_manager import TaskManager, Task, TaskStatus, LogEntry
|
||||
from ...core.task_manager.models import LogFilter, LogStats
|
||||
from ...dependencies import get_task_manager, has_permission, get_current_user
|
||||
|
||||
router = APIRouter()
|
||||
@@ -118,93 +116,27 @@ async def get_task(
|
||||
|
||||
@router.get("/{task_id}/logs", response_model=List[LogEntry])
|
||||
# [DEF:get_task_logs:Function]
|
||||
# @PURPOSE: Retrieve logs for a specific task with optional filtering.
|
||||
# @PURPOSE: Retrieve logs for a specific task.
|
||||
# @PARAM: task_id (str) - The unique identifier of the task.
|
||||
# @PARAM: level (Optional[str]) - Filter by log level (DEBUG, INFO, WARNING, ERROR).
|
||||
# @PARAM: source (Optional[str]) - Filter by source component.
|
||||
# @PARAM: search (Optional[str]) - Text search in message.
|
||||
# @PARAM: offset (int) - Number of logs to skip.
|
||||
# @PARAM: limit (int) - Maximum number of logs to return.
|
||||
# @PARAM: task_manager (TaskManager) - The task manager instance.
|
||||
# @PRE: task_id must exist.
|
||||
# @POST: Returns a list of log entries or raises 404.
|
||||
# @RETURN: List[LogEntry] - List of log entries.
|
||||
# @TIER: CRITICAL
|
||||
async def get_task_logs(
|
||||
task_id: str,
|
||||
level: Optional[str] = Query(None, description="Filter by log level (DEBUG, INFO, WARNING, ERROR)"),
|
||||
source: Optional[str] = Query(None, description="Filter by source component"),
|
||||
search: Optional[str] = Query(None, description="Text search in message"),
|
||||
offset: int = Query(0, ge=0, description="Number of logs to skip"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum number of logs to return"),
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
"""
|
||||
Retrieve logs for a specific task with optional filtering.
|
||||
Supports filtering by level, source, and text search.
|
||||
Retrieve logs for a specific task.
|
||||
"""
|
||||
with belief_scope("get_task_logs"):
|
||||
task = task_manager.get_task(task_id)
|
||||
if not task:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
|
||||
|
||||
log_filter = LogFilter(
|
||||
level=level.upper() if level else None,
|
||||
source=source,
|
||||
search=search,
|
||||
offset=offset,
|
||||
limit=limit
|
||||
)
|
||||
return task_manager.get_task_logs(task_id, log_filter)
|
||||
return task_manager.get_task_logs(task_id)
|
||||
# [/DEF:get_task_logs:Function]
|
||||
|
||||
@router.get("/{task_id}/logs/stats", response_model=LogStats)
|
||||
# [DEF:get_task_log_stats:Function]
|
||||
# @PURPOSE: Get statistics about logs for a task (counts by level and source).
|
||||
# @PARAM: task_id (str) - The unique identifier of the task.
|
||||
# @PARAM: task_manager (TaskManager) - The task manager instance.
|
||||
# @PRE: task_id must exist.
|
||||
# @POST: Returns log statistics or raises 404.
|
||||
# @RETURN: LogStats - Statistics about task logs.
|
||||
async def get_task_log_stats(
|
||||
task_id: str,
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
"""
|
||||
Get statistics about logs for a task (counts by level and source).
|
||||
"""
|
||||
with belief_scope("get_task_log_stats"):
|
||||
task = task_manager.get_task(task_id)
|
||||
if not task:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
|
||||
return task_manager.get_task_log_stats(task_id)
|
||||
# [/DEF:get_task_log_stats:Function]
|
||||
|
||||
@router.get("/{task_id}/logs/sources", response_model=List[str])
|
||||
# [DEF:get_task_log_sources:Function]
|
||||
# @PURPOSE: Get unique sources for a task's logs.
|
||||
# @PARAM: task_id (str) - The unique identifier of the task.
|
||||
# @PARAM: task_manager (TaskManager) - The task manager instance.
|
||||
# @PRE: task_id must exist.
|
||||
# @POST: Returns list of unique source names or raises 404.
|
||||
# @RETURN: List[str] - Unique source names.
|
||||
async def get_task_log_sources(
|
||||
task_id: str,
|
||||
task_manager: TaskManager = Depends(get_task_manager),
|
||||
_ = Depends(has_permission("tasks", "READ"))
|
||||
):
|
||||
"""
|
||||
Get unique sources for a task's logs.
|
||||
"""
|
||||
with belief_scope("get_task_log_sources"):
|
||||
task = task_manager.get_task(task_id)
|
||||
if not task:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
|
||||
return task_manager.get_task_log_sources(task_id)
|
||||
# [/DEF:get_task_log_sources:Function]
|
||||
|
||||
@router.post("/{task_id}/resolve", response_model=Task)
|
||||
# [DEF:resolve_task:Function]
|
||||
# @PURPOSE: Resolve a task that is awaiting mapping.
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
# [DEF:AppModule:Module]
|
||||
# @TIER: CRITICAL
|
||||
# @SEMANTICS: app, main, entrypoint, fastapi
|
||||
# @PURPOSE: The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming.
|
||||
# @LAYER: UI (API)
|
||||
# @RELATION: Depends on the dependency module and API route modules.
|
||||
# @INVARIANT: Only one FastAPI app instance exists per process.
|
||||
# @INVARIANT: All WebSocket connections must be properly cleaned up on disconnect.
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@@ -21,7 +18,6 @@ import asyncio
|
||||
import os
|
||||
|
||||
from .dependencies import get_task_manager, get_scheduler_service
|
||||
from .core.utils.network import NetworkError
|
||||
from .core.logger import logger, belief_scope
|
||||
from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm
|
||||
from .api import auth
|
||||
@@ -81,34 +77,13 @@ app.add_middleware(
|
||||
# @POST: Logs request and response details.
|
||||
# @PARAM: request (Request) - The incoming request object.
|
||||
# @PARAM: call_next (Callable) - The next middleware or route handler.
|
||||
@app.exception_handler(NetworkError)
|
||||
async def network_error_handler(request: Request, exc: NetworkError):
|
||||
with belief_scope("network_error_handler"):
|
||||
logger.error(f"Network error: {exc}")
|
||||
return HTTPException(
|
||||
status_code=503,
|
||||
detail="Environment unavailable. Please check if the Superset instance is running."
|
||||
)
|
||||
|
||||
@app.middleware("http")
|
||||
async def log_requests(request: Request, call_next):
|
||||
# Avoid spamming logs for polling endpoints
|
||||
is_polling = request.url.path.endswith("/api/tasks") and request.method == "GET"
|
||||
|
||||
if not is_polling:
|
||||
logger.info(f"Incoming request: {request.method} {request.url.path}")
|
||||
|
||||
try:
|
||||
with belief_scope("log_requests", f"{request.method} {request.url.path}"):
|
||||
logger.info(f"[DEBUG] Incoming request: {request.method} {request.url.path}")
|
||||
response = await call_next(request)
|
||||
if not is_polling:
|
||||
logger.info(f"Response status: {response.status_code} for {request.url.path}")
|
||||
logger.info(f"[DEBUG] Response status: {response.status_code} for {request.url.path}")
|
||||
return response
|
||||
except NetworkError as e:
|
||||
logger.error(f"Network error caught in middleware: {e}")
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Environment unavailable. Please check if the Superset instance is running."
|
||||
)
|
||||
# [/DEF:log_requests:Function]
|
||||
|
||||
# Include API routes
|
||||
@@ -126,65 +101,26 @@ app.include_router(llm.router)
|
||||
app.include_router(storage.router, prefix="/api/storage", tags=["Storage"])
|
||||
|
||||
# [DEF:websocket_endpoint:Function]
|
||||
# @PURPOSE: Provides a WebSocket endpoint for real-time log streaming of a task with server-side filtering.
|
||||
# @PURPOSE: Provides a WebSocket endpoint for real-time log streaming of a task.
|
||||
# @PRE: task_id must be a valid task ID.
|
||||
# @POST: WebSocket connection is managed and logs are streamed until disconnect.
|
||||
# @TIER: CRITICAL
|
||||
# @UX_STATE: Connecting -> Streaming -> (Disconnected)
|
||||
@app.websocket("/ws/logs/{task_id}")
|
||||
async def websocket_endpoint(
|
||||
websocket: WebSocket,
|
||||
task_id: str,
|
||||
source: str = None,
|
||||
level: str = None
|
||||
):
|
||||
"""
|
||||
WebSocket endpoint for real-time log streaming with optional server-side filtering.
|
||||
|
||||
Query Parameters:
|
||||
source: Filter logs by source component (e.g., "plugin", "superset_api")
|
||||
level: Filter logs by minimum level (DEBUG, INFO, WARNING, ERROR)
|
||||
"""
|
||||
async def websocket_endpoint(websocket: WebSocket, task_id: str):
|
||||
with belief_scope("websocket_endpoint", f"task_id={task_id}"):
|
||||
await websocket.accept()
|
||||
|
||||
# Normalize filter parameters
|
||||
source_filter = source.lower() if source else None
|
||||
level_filter = level.upper() if level else None
|
||||
|
||||
# Level hierarchy for filtering
|
||||
level_hierarchy = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
|
||||
min_level = level_hierarchy.get(level_filter, 0) if level_filter else 0
|
||||
|
||||
logger.info(f"WebSocket connection accepted for task {task_id} (source={source_filter}, level={level_filter})")
|
||||
logger.info(f"WebSocket connection accepted for task {task_id}")
|
||||
task_manager = get_task_manager()
|
||||
queue = await task_manager.subscribe_logs(task_id)
|
||||
|
||||
def matches_filters(log_entry) -> bool:
|
||||
"""Check if log entry matches the filter criteria."""
|
||||
# Check source filter
|
||||
if source_filter and log_entry.source.lower() != source_filter:
|
||||
return False
|
||||
|
||||
# Check level filter
|
||||
if level_filter:
|
||||
log_level = level_hierarchy.get(log_entry.level.upper(), 0)
|
||||
if log_level < min_level:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
try:
|
||||
# Stream new logs
|
||||
logger.info(f"Starting log stream for task {task_id}")
|
||||
|
||||
# Send initial logs first to build context (apply filters)
|
||||
# Send initial logs first to build context
|
||||
initial_logs = task_manager.get_task_logs(task_id)
|
||||
for log_entry in initial_logs:
|
||||
if matches_filters(log_entry):
|
||||
log_dict = log_entry.dict()
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
log_dict = log_entry.dict()
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
|
||||
# Force a check for AWAITING_INPUT status immediately upon connection
|
||||
# This ensures that if the task is already waiting when the user connects, they get the prompt.
|
||||
@@ -202,11 +138,6 @@ async def websocket_endpoint(
|
||||
|
||||
while True:
|
||||
log_entry = await queue.get()
|
||||
|
||||
# Apply server-side filtering
|
||||
if not matches_filters(log_entry):
|
||||
continue
|
||||
|
||||
log_dict = log_entry.dict()
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
await websocket.send_json(log_dict)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.core.auth.jwt:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: jwt, token, session, auth
|
||||
# @PURPOSE: JWT token generation and validation logic.
|
||||
# @LAYER: Core
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.core.auth.logger:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: auth, logger, audit, security
|
||||
# @PURPOSE: Audit logging for security-related events.
|
||||
# @LAYER: Core
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:ConfigModels:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: config, models, pydantic
|
||||
# @PURPOSE: Defines the data models for application configuration using Pydantic.
|
||||
# @LAYER: Core
|
||||
@@ -35,7 +34,6 @@ class Environment(BaseModel):
|
||||
# @PURPOSE: Defines the configuration for the application's logging system.
|
||||
class LoggingConfig(BaseModel):
|
||||
level: str = "INFO"
|
||||
task_log_level: str = "INFO" # Minimum level for task-specific logs (DEBUG, INFO, WARNING, ERROR)
|
||||
file_path: Optional[str] = "logs/app.log"
|
||||
max_bytes: int = 10 * 1024 * 1024
|
||||
backup_count: int = 5
|
||||
|
||||
@@ -19,9 +19,6 @@ _belief_state = threading.local()
|
||||
# Global flag for belief state logging
|
||||
_enable_belief_state = True
|
||||
|
||||
# Global task log level filter
|
||||
_task_log_level = "INFO"
|
||||
|
||||
# [DEF:BeliefFormatter:Class]
|
||||
# @PURPOSE: Custom logging formatter that adds belief state prefixes to log messages.
|
||||
class BeliefFormatter(logging.Formatter):
|
||||
@@ -61,12 +58,12 @@ class LogEntry(BaseModel):
|
||||
# @SEMANTICS: logging, context, belief_state
|
||||
@contextmanager
|
||||
def belief_scope(anchor_id: str, message: str = ""):
|
||||
# Log Entry if enabled (DEBUG level to reduce noise)
|
||||
# Log Entry if enabled
|
||||
if _enable_belief_state:
|
||||
entry_msg = f"[{anchor_id}][Entry]"
|
||||
if message:
|
||||
entry_msg += f" {message}"
|
||||
logger.debug(entry_msg)
|
||||
logger.info(entry_msg)
|
||||
|
||||
# Set thread-local anchor_id
|
||||
old_anchor = getattr(_belief_state, 'anchor_id', None)
|
||||
@@ -74,13 +71,13 @@ def belief_scope(anchor_id: str, message: str = ""):
|
||||
|
||||
try:
|
||||
yield
|
||||
# Log Coherence OK and Exit (DEBUG level to reduce noise)
|
||||
logger.debug(f"[{anchor_id}][Coherence:OK]")
|
||||
# Log Coherence OK and Exit
|
||||
logger.info(f"[{anchor_id}][Coherence:OK]")
|
||||
if _enable_belief_state:
|
||||
logger.debug(f"[{anchor_id}][Exit]")
|
||||
logger.info(f"[{anchor_id}][Exit]")
|
||||
except Exception as e:
|
||||
# Log Coherence Failed (DEBUG level to reduce noise)
|
||||
logger.debug(f"[{anchor_id}][Coherence:Failed] {str(e)}")
|
||||
# Log Coherence Failed
|
||||
logger.info(f"[{anchor_id}][Coherence:Failed] {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Restore old anchor
|
||||
@@ -91,13 +88,12 @@ def belief_scope(anchor_id: str, message: str = ""):
|
||||
# [DEF:configure_logger:Function]
|
||||
# @PURPOSE: Configures the logger with the provided logging settings.
|
||||
# @PRE: config is a valid LoggingConfig instance.
|
||||
# @POST: Logger level, handlers, belief state flag, and task log level are updated.
|
||||
# @POST: Logger level, handlers, and belief state flag are updated.
|
||||
# @PARAM: config (LoggingConfig) - The logging configuration.
|
||||
# @SEMANTICS: logging, configuration, initialization
|
||||
def configure_logger(config):
|
||||
global _enable_belief_state, _task_log_level
|
||||
global _enable_belief_state
|
||||
_enable_belief_state = config.enable_belief_state
|
||||
_task_log_level = config.task_log_level.upper()
|
||||
|
||||
# Set logger level
|
||||
level = getattr(logging, config.level.upper(), logging.INFO)
|
||||
@@ -134,36 +130,6 @@ def configure_logger(config):
|
||||
))
|
||||
# [/DEF:configure_logger:Function]
|
||||
|
||||
# [DEF:get_task_log_level:Function]
|
||||
# @PURPOSE: Returns the current task log level filter.
|
||||
# @PRE: None.
|
||||
# @POST: Returns the task log level string.
|
||||
# @RETURN: str - The current task log level (DEBUG, INFO, WARNING, ERROR).
|
||||
# @SEMANTICS: logging, configuration, getter
|
||||
def get_task_log_level() -> str:
|
||||
"""Returns the current task log level filter."""
|
||||
return _task_log_level
|
||||
# [/DEF:get_task_log_level:Function]
|
||||
|
||||
# [DEF:should_log_task_level:Function]
|
||||
# @PURPOSE: Checks if a log level should be recorded based on task_log_level setting.
|
||||
# @PRE: level is a valid log level string.
|
||||
# @POST: Returns True if level meets or exceeds task_log_level threshold.
|
||||
# @PARAM: level (str) - The log level to check.
|
||||
# @RETURN: bool - True if the level should be logged.
|
||||
# @SEMANTICS: logging, filter, level
|
||||
def should_log_task_level(level: str) -> bool:
|
||||
"""Checks if a log level should be recorded based on task_log_level setting."""
|
||||
level_order = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
|
||||
current_level = _task_log_level.upper()
|
||||
check_level = level.upper()
|
||||
|
||||
current_order = level_order.get(current_level, 1) # Default to INFO
|
||||
check_order = level_order.get(check_level, 1)
|
||||
|
||||
return check_order >= current_order
|
||||
# [/DEF:should_log_task_level:Function]
|
||||
|
||||
# [DEF:WebSocketLogHandler:Class]
|
||||
# @SEMANTICS: logging, handler, websocket, buffer
|
||||
# @PURPOSE: A custom logging handler that captures log records into a buffer. It is designed to be extended for real-time log streaming over WebSockets.
|
||||
|
||||
@@ -7,7 +7,6 @@ from jsonschema import validate
|
||||
from .logger import belief_scope
|
||||
|
||||
# [DEF:PluginLoader:Class]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: plugin, loader, dynamic, import
|
||||
# @PURPOSE: Scans a specified directory for Python modules, dynamically loads them, and registers any classes that are valid implementations of the PluginBase interface.
|
||||
# @LAYER: Core
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:SchedulerModule:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: scheduler, apscheduler, cron, backup
|
||||
# @PURPOSE: Manages scheduled tasks using APScheduler.
|
||||
# @LAYER: Core
|
||||
@@ -15,7 +14,6 @@ import asyncio
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:SchedulerService:Class]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: scheduler, service, apscheduler
|
||||
# @PURPOSE: Provides a service to manage scheduled backup tasks.
|
||||
class SchedulerService:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:TaskManagerPackage:Module]
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: task, manager, package, exports
|
||||
# @PURPOSE: Exports the public API of the task manager package.
|
||||
# @LAYER: Core
|
||||
|
||||
@@ -1,76 +1,47 @@
|
||||
# [DEF:TaskCleanupModule:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: task, cleanup, retention, logs
|
||||
# @PURPOSE: Implements task cleanup and retention policies, including associated logs.
|
||||
# @SEMANTICS: task, cleanup, retention
|
||||
# @PURPOSE: Implements task cleanup and retention policies.
|
||||
# @LAYER: Core
|
||||
# @RELATION: Uses TaskPersistenceService and TaskLogPersistenceService to delete old tasks and logs.
|
||||
# @RELATION: Uses TaskPersistenceService to delete old tasks.
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List
|
||||
from .persistence import TaskPersistenceService, TaskLogPersistenceService
|
||||
from .persistence import TaskPersistenceService
|
||||
from ..logger import logger, belief_scope
|
||||
from ..config_manager import ConfigManager
|
||||
|
||||
# [DEF:TaskCleanupService:Class]
|
||||
# @PURPOSE: Provides methods to clean up old task records and their associated logs.
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Provides methods to clean up old task records.
|
||||
class TaskCleanupService:
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initializes the cleanup service with dependencies.
|
||||
# @PRE: persistence_service and config_manager are valid.
|
||||
# @POST: Cleanup service is ready.
|
||||
def __init__(
|
||||
self,
|
||||
persistence_service: TaskPersistenceService,
|
||||
log_persistence_service: TaskLogPersistenceService,
|
||||
config_manager: ConfigManager
|
||||
):
|
||||
def __init__(self, persistence_service: TaskPersistenceService, config_manager: ConfigManager):
|
||||
self.persistence_service = persistence_service
|
||||
self.log_persistence_service = log_persistence_service
|
||||
self.config_manager = config_manager
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:run_cleanup:Function]
|
||||
# @PURPOSE: Deletes tasks older than the configured retention period and their logs.
|
||||
# @PURPOSE: Deletes tasks older than the configured retention period.
|
||||
# @PRE: Config manager has valid settings.
|
||||
# @POST: Old tasks and their logs are deleted from persistence.
|
||||
# @POST: Old tasks are deleted from persistence.
|
||||
def run_cleanup(self):
|
||||
with belief_scope("TaskCleanupService.run_cleanup"):
|
||||
settings = self.config_manager.get_config().settings
|
||||
retention_days = settings.task_retention_days
|
||||
|
||||
# This is a simplified implementation.
|
||||
# In a real scenario, we would query IDs of tasks older than retention_days.
|
||||
# For now, we'll log the action.
|
||||
logger.info(f"Cleaning up tasks older than {retention_days} days.")
|
||||
|
||||
# Load tasks to check for limit
|
||||
# Re-loading tasks to check for limit
|
||||
tasks = self.persistence_service.load_tasks(limit=1000)
|
||||
if len(tasks) > settings.task_retention_limit:
|
||||
to_delete: List[str] = [t.id for t in tasks[settings.task_retention_limit:]]
|
||||
|
||||
# Delete logs first (before task records)
|
||||
self.log_persistence_service.delete_logs_for_tasks(to_delete)
|
||||
|
||||
# Then delete task records
|
||||
to_delete = [t.id for t in tasks[settings.task_retention_limit:]]
|
||||
self.persistence_service.delete_tasks(to_delete)
|
||||
|
||||
logger.info(f"Deleted {len(to_delete)} tasks and their logs exceeding limit of {settings.task_retention_limit}")
|
||||
logger.info(f"Deleted {len(to_delete)} tasks exceeding limit of {settings.task_retention_limit}")
|
||||
# [/DEF:run_cleanup:Function]
|
||||
|
||||
# [DEF:delete_task_with_logs:Function]
|
||||
# @PURPOSE: Delete a single task and all its associated logs.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Task and all its logs are deleted.
|
||||
# @PARAM: task_id (str) - The task ID to delete.
|
||||
def delete_task_with_logs(self, task_id: str) -> None:
|
||||
"""Delete a single task and all its associated logs."""
|
||||
with belief_scope("TaskCleanupService.delete_task_with_logs", f"task_id={task_id}"):
|
||||
# Delete logs first
|
||||
self.log_persistence_service.delete_logs_for_task(task_id)
|
||||
|
||||
# Then delete task record
|
||||
self.persistence_service.delete_tasks([task_id])
|
||||
|
||||
logger.info(f"Deleted task {task_id} and its associated logs")
|
||||
# [/DEF:delete_task_with_logs:Function]
|
||||
|
||||
# [/DEF:TaskCleanupService:Class]
|
||||
# [/DEF:TaskCleanupModule:Module]
|
||||
@@ -1,115 +0,0 @@
|
||||
# [DEF:TaskContextModule:Module]
|
||||
# @SEMANTICS: task, context, plugin, execution, logger
|
||||
# @PURPOSE: Provides execution context passed to plugins during task execution.
|
||||
# @LAYER: Core
|
||||
# @RELATION: DEPENDS_ON -> TaskLogger, USED_BY -> plugins
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: Each TaskContext is bound to a single task execution.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
from typing import Dict, Any, Optional, Callable
|
||||
from .task_logger import TaskLogger
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TaskContext:Class]
|
||||
# @SEMANTICS: context, task, execution, plugin
|
||||
# @PURPOSE: A container passed to plugin.execute() providing the logger and other task-specific utilities.
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: logger is always a valid TaskLogger instance.
|
||||
# @UX_STATE: Idle -> Active -> Complete
|
||||
class TaskContext:
|
||||
"""
|
||||
Execution context provided to plugins during task execution.
|
||||
|
||||
Usage:
|
||||
def execute(params: dict, context: TaskContext = None):
|
||||
if context:
|
||||
context.logger.info("Starting process")
|
||||
context.logger.progress("Processing items", percent=50)
|
||||
# ... plugin logic
|
||||
"""
|
||||
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initialize the TaskContext with task-specific resources.
|
||||
# @PRE: task_id is a valid task identifier, add_log_fn is callable.
|
||||
# @POST: TaskContext is ready to be passed to plugin.execute().
|
||||
# @PARAM: task_id (str) - The ID of the task.
|
||||
# @PARAM: add_log_fn (Callable) - Function to add log to TaskManager.
|
||||
# @PARAM: params (Dict) - Task parameters.
|
||||
# @PARAM: default_source (str) - Default source for logs (default: "plugin").
|
||||
def __init__(
|
||||
self,
|
||||
task_id: str,
|
||||
add_log_fn: Callable,
|
||||
params: Dict[str, Any],
|
||||
default_source: str = "plugin"
|
||||
):
|
||||
self._task_id = task_id
|
||||
self._params = params
|
||||
self._logger = TaskLogger(
|
||||
task_id=task_id,
|
||||
add_log_fn=add_log_fn,
|
||||
source=default_source
|
||||
)
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:task_id:Function]
|
||||
# @PURPOSE: Get the task ID.
|
||||
# @PRE: TaskContext must be initialized.
|
||||
# @POST: Returns the task ID string.
|
||||
# @RETURN: str - The task ID.
|
||||
@property
|
||||
def task_id(self) -> str:
|
||||
return self._task_id
|
||||
# [/DEF:task_id:Function]
|
||||
|
||||
# [DEF:logger:Function]
|
||||
# @PURPOSE: Get the TaskLogger instance for this context.
|
||||
# @PRE: TaskContext must be initialized.
|
||||
# @POST: Returns the TaskLogger instance.
|
||||
# @RETURN: TaskLogger - The logger instance.
|
||||
@property
|
||||
def logger(self) -> TaskLogger:
|
||||
return self._logger
|
||||
# [/DEF:logger:Function]
|
||||
|
||||
# [DEF:params:Function]
|
||||
# @PURPOSE: Get the task parameters.
|
||||
# @PRE: TaskContext must be initialized.
|
||||
# @POST: Returns the parameters dictionary.
|
||||
# @RETURN: Dict[str, Any] - The task parameters.
|
||||
@property
|
||||
def params(self) -> Dict[str, Any]:
|
||||
return self._params
|
||||
# [/DEF:params:Function]
|
||||
|
||||
# [DEF:get_param:Function]
|
||||
# @PURPOSE: Get a specific parameter value with optional default.
|
||||
# @PRE: TaskContext must be initialized.
|
||||
# @POST: Returns parameter value or default.
|
||||
# @PARAM: key (str) - Parameter key.
|
||||
# @PARAM: default (Any) - Default value if key not found.
|
||||
# @RETURN: Any - Parameter value or default.
|
||||
def get_param(self, key: str, default: Any = None) -> Any:
|
||||
return self._params.get(key, default)
|
||||
# [/DEF:get_param:Function]
|
||||
|
||||
# [DEF:create_sub_context:Function]
|
||||
# @PURPOSE: Create a sub-context with a different default source.
|
||||
# @PRE: source is a non-empty string.
|
||||
# @POST: Returns new TaskContext with different logger source.
|
||||
# @PARAM: source (str) - New default source for logging.
|
||||
# @RETURN: TaskContext - New context with different source.
|
||||
def create_sub_context(self, source: str) -> "TaskContext":
|
||||
"""Create a sub-context with a different default source for logging."""
|
||||
return TaskContext(
|
||||
task_id=self._task_id,
|
||||
add_log_fn=self._logger._add_log,
|
||||
params=self._params,
|
||||
default_source=source
|
||||
)
|
||||
# [/DEF:create_sub_context:Function]
|
||||
|
||||
# [/DEF:TaskContext:Class]
|
||||
|
||||
# [/DEF:TaskContextModule:Module]
|
||||
@@ -8,33 +8,23 @@
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
import asyncio
|
||||
import threading
|
||||
import inspect
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from .models import Task, TaskStatus, LogEntry, LogFilter, LogStats, TaskLog
|
||||
from .persistence import TaskPersistenceService, TaskLogPersistenceService
|
||||
from .context import TaskContext
|
||||
from ..logger import logger, belief_scope, should_log_task_level
|
||||
from .models import Task, TaskStatus, LogEntry
|
||||
from .persistence import TaskPersistenceService
|
||||
from ..logger import logger, belief_scope
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TaskManager:Class]
|
||||
# @SEMANTICS: task, manager, lifecycle, execution, state
|
||||
# @PURPOSE: Manages the lifecycle of tasks, including their creation, execution, and state tracking.
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: Task IDs are unique within the registry.
|
||||
# @INVARIANT: Each task has exactly one status at any time.
|
||||
# @INVARIANT: Log entries are never deleted after being added to a task.
|
||||
class TaskManager:
|
||||
"""
|
||||
Manages the lifecycle of tasks, including their creation, execution, and state tracking.
|
||||
"""
|
||||
|
||||
# Log flush interval in seconds
|
||||
LOG_FLUSH_INTERVAL = 2.0
|
||||
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initialize the TaskManager with dependencies.
|
||||
# @PRE: plugin_loader is initialized.
|
||||
@@ -45,18 +35,8 @@ class TaskManager:
|
||||
self.plugin_loader = plugin_loader
|
||||
self.tasks: Dict[str, Task] = {}
|
||||
self.subscribers: Dict[str, List[asyncio.Queue]] = {}
|
||||
self.executor = ThreadPoolExecutor(max_workers=5) # For CPU-bound plugin execution
|
||||
self.executor = ThreadPoolExecutor(max_workers=5) # For CPU-bound plugin execution
|
||||
self.persistence_service = TaskPersistenceService()
|
||||
self.log_persistence_service = TaskLogPersistenceService()
|
||||
|
||||
# Log buffer: task_id -> List[LogEntry]
|
||||
self._log_buffer: Dict[str, List[LogEntry]] = {}
|
||||
self._log_buffer_lock = threading.Lock()
|
||||
|
||||
# Flusher thread for batch writing logs
|
||||
self._flusher_stop_event = threading.Event()
|
||||
self._flusher_thread = threading.Thread(target=self._flusher_loop, daemon=True)
|
||||
self._flusher_thread.start()
|
||||
|
||||
try:
|
||||
self.loop = asyncio.get_running_loop()
|
||||
@@ -67,59 +47,6 @@ class TaskManager:
|
||||
# Load persisted tasks on startup
|
||||
self.load_persisted_tasks()
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:_flusher_loop:Function]
|
||||
# @PURPOSE: Background thread that periodically flushes log buffer to database.
|
||||
# @PRE: TaskManager is initialized.
|
||||
# @POST: Logs are batch-written to database every LOG_FLUSH_INTERVAL seconds.
|
||||
def _flusher_loop(self):
|
||||
"""Background thread that flushes log buffer to database."""
|
||||
while not self._flusher_stop_event.is_set():
|
||||
self._flush_logs()
|
||||
self._flusher_stop_event.wait(self.LOG_FLUSH_INTERVAL)
|
||||
# [/DEF:_flusher_loop:Function]
|
||||
|
||||
# [DEF:_flush_logs:Function]
|
||||
# @PURPOSE: Flush all buffered logs to the database.
|
||||
# @PRE: None.
|
||||
# @POST: All buffered logs are written to task_logs table.
|
||||
def _flush_logs(self):
|
||||
"""Flush all buffered logs to the database."""
|
||||
with self._log_buffer_lock:
|
||||
task_ids = list(self._log_buffer.keys())
|
||||
|
||||
for task_id in task_ids:
|
||||
with self._log_buffer_lock:
|
||||
logs = self._log_buffer.pop(task_id, [])
|
||||
|
||||
if logs:
|
||||
try:
|
||||
self.log_persistence_service.add_logs(task_id, logs)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to flush logs for task {task_id}: {e}")
|
||||
# Re-add logs to buffer on failure
|
||||
with self._log_buffer_lock:
|
||||
if task_id not in self._log_buffer:
|
||||
self._log_buffer[task_id] = []
|
||||
self._log_buffer[task_id].extend(logs)
|
||||
# [/DEF:_flush_logs:Function]
|
||||
|
||||
# [DEF:_flush_task_logs:Function]
|
||||
# @PURPOSE: Flush logs for a specific task immediately.
|
||||
# @PRE: task_id exists.
|
||||
# @POST: Task's buffered logs are written to database.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
def _flush_task_logs(self, task_id: str):
|
||||
"""Flush logs for a specific task immediately."""
|
||||
with self._log_buffer_lock:
|
||||
logs = self._log_buffer.pop(task_id, [])
|
||||
|
||||
if logs:
|
||||
try:
|
||||
self.log_persistence_service.add_logs(task_id, logs)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to flush logs for task {task_id}: {e}")
|
||||
# [/DEF:_flush_task_logs:Function]
|
||||
|
||||
# [DEF:create_task:Function]
|
||||
# @PURPOSE: Creates and queues a new task for execution.
|
||||
@@ -151,7 +78,7 @@ class TaskManager:
|
||||
# [/DEF:create_task:Function]
|
||||
|
||||
# [DEF:_run_task:Function]
|
||||
# @PURPOSE: Internal method to execute a task with TaskContext support.
|
||||
# @PURPOSE: Internal method to execute a task.
|
||||
# @PRE: Task exists in registry.
|
||||
# @POST: Task is executed, status updated to SUCCESS or FAILED.
|
||||
# @PARAM: task_id (str) - The ID of the task to run.
|
||||
@@ -164,54 +91,30 @@ class TaskManager:
|
||||
task.status = TaskStatus.RUNNING
|
||||
task.started_at = datetime.utcnow()
|
||||
self.persistence_service.persist_task(task)
|
||||
self._add_log(task_id, "INFO", f"Task started for plugin '{plugin.name}'", source="system")
|
||||
self._add_log(task_id, "INFO", f"Task started for plugin '{plugin.name}'")
|
||||
|
||||
try:
|
||||
# Prepare params and check if plugin supports new TaskContext
|
||||
# Execute plugin
|
||||
params = {**task.params, "_task_id": task_id}
|
||||
|
||||
# Check if plugin's execute method accepts 'context' parameter
|
||||
sig = inspect.signature(plugin.execute)
|
||||
accepts_context = 'context' in sig.parameters
|
||||
|
||||
if accepts_context:
|
||||
# Create TaskContext for new-style plugins
|
||||
context = TaskContext(
|
||||
task_id=task_id,
|
||||
add_log_fn=self._add_log,
|
||||
params=params,
|
||||
default_source="plugin"
|
||||
)
|
||||
|
||||
if asyncio.iscoroutinefunction(plugin.execute):
|
||||
task.result = await plugin.execute(params, context=context)
|
||||
else:
|
||||
task.result = await self.loop.run_in_executor(
|
||||
self.executor,
|
||||
lambda: plugin.execute(params, context=context)
|
||||
)
|
||||
if asyncio.iscoroutinefunction(plugin.execute):
|
||||
task.result = await plugin.execute(params)
|
||||
else:
|
||||
# Backward compatibility: old-style plugins without context
|
||||
if asyncio.iscoroutinefunction(plugin.execute):
|
||||
task.result = await plugin.execute(params)
|
||||
else:
|
||||
task.result = await self.loop.run_in_executor(
|
||||
self.executor,
|
||||
plugin.execute,
|
||||
params
|
||||
)
|
||||
task.result = await self.loop.run_in_executor(
|
||||
self.executor,
|
||||
plugin.execute,
|
||||
params
|
||||
)
|
||||
|
||||
logger.info(f"Task {task_id} completed successfully")
|
||||
task.status = TaskStatus.SUCCESS
|
||||
self._add_log(task_id, "INFO", f"Task completed successfully for plugin '{plugin.name}'", source="system")
|
||||
self._add_log(task_id, "INFO", f"Task completed successfully for plugin '{plugin.name}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Task {task_id} failed: {e}")
|
||||
task.status = TaskStatus.FAILED
|
||||
self._add_log(task_id, "ERROR", f"Task failed: {e}", source="system", metadata={"error_type": type(e).__name__})
|
||||
self._add_log(task_id, "ERROR", f"Task failed: {e}", {"error_type": type(e).__name__})
|
||||
finally:
|
||||
task.finished_at = datetime.utcnow()
|
||||
# Flush any remaining buffered logs before persisting task
|
||||
self._flush_task_logs(task_id)
|
||||
self.persistence_service.persist_task(task)
|
||||
logger.info(f"Task {task_id} execution finished with status: {task.status}")
|
||||
# [/DEF:_run_task:Function]
|
||||
@@ -321,106 +224,36 @@ class TaskManager:
|
||||
# [/DEF:get_tasks:Function]
|
||||
|
||||
# [DEF:get_task_logs:Function]
|
||||
# @PURPOSE: Retrieves logs for a specific task (from memory for running, persistence for completed).
|
||||
# @PURPOSE: Retrieves logs for a specific task.
|
||||
# @PRE: task_id is a string.
|
||||
# @POST: Returns list of LogEntry or TaskLog objects.
|
||||
# @POST: Returns list of LogEntry objects.
|
||||
# @PARAM: task_id (str) - ID of the task.
|
||||
# @PARAM: log_filter (Optional[LogFilter]) - Filter parameters.
|
||||
# @RETURN: List[LogEntry] - List of log entries.
|
||||
def get_task_logs(self, task_id: str, log_filter: Optional[LogFilter] = None) -> List[LogEntry]:
|
||||
def get_task_logs(self, task_id: str) -> List[LogEntry]:
|
||||
with belief_scope("TaskManager.get_task_logs", f"task_id={task_id}"):
|
||||
task = self.tasks.get(task_id)
|
||||
|
||||
# For completed tasks, fetch from persistence
|
||||
if task and task.status in [TaskStatus.SUCCESS, TaskStatus.FAILED]:
|
||||
if log_filter is None:
|
||||
log_filter = LogFilter()
|
||||
task_logs = self.log_persistence_service.get_logs(task_id, log_filter)
|
||||
# Convert TaskLog to LogEntry for backward compatibility
|
||||
return [
|
||||
LogEntry(
|
||||
timestamp=log.timestamp,
|
||||
level=log.level,
|
||||
message=log.message,
|
||||
source=log.source,
|
||||
metadata=log.metadata
|
||||
)
|
||||
for log in task_logs
|
||||
]
|
||||
|
||||
# For running/pending tasks, return from memory
|
||||
return task.logs if task else []
|
||||
# [/DEF:get_task_logs:Function]
|
||||
|
||||
# [DEF:get_task_log_stats:Function]
|
||||
# @PURPOSE: Get statistics about logs for a task.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Returns LogStats with counts by level and source.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @RETURN: LogStats - Statistics about task logs.
|
||||
def get_task_log_stats(self, task_id: str) -> LogStats:
|
||||
with belief_scope("TaskManager.get_task_log_stats", f"task_id={task_id}"):
|
||||
return self.log_persistence_service.get_log_stats(task_id)
|
||||
# [/DEF:get_task_log_stats:Function]
|
||||
|
||||
# [DEF:get_task_log_sources:Function]
|
||||
# @PURPOSE: Get unique sources for a task's logs.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Returns list of unique source strings.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @RETURN: List[str] - Unique source names.
|
||||
def get_task_log_sources(self, task_id: str) -> List[str]:
|
||||
with belief_scope("TaskManager.get_task_log_sources", f"task_id={task_id}"):
|
||||
return self.log_persistence_service.get_sources(task_id)
|
||||
# [/DEF:get_task_log_sources:Function]
|
||||
|
||||
# [DEF:_add_log:Function]
|
||||
# @PURPOSE: Adds a log entry to a task buffer and notifies subscribers.
|
||||
# @PURPOSE: Adds a log entry to a task and notifies subscribers.
|
||||
# @PRE: Task exists.
|
||||
# @POST: Log added to buffer and pushed to queues (if level meets task_log_level filter).
|
||||
# @POST: Log added to task and pushed to queues.
|
||||
# @PARAM: task_id (str) - ID of the task.
|
||||
# @PARAM: level (str) - Log level.
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (str) - Source component (default: "system").
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional structured data.
|
||||
# @PARAM: context (Optional[Dict]) - Legacy context (for backward compatibility).
|
||||
def _add_log(
|
||||
self,
|
||||
task_id: str,
|
||||
level: str,
|
||||
message: str,
|
||||
source: str = "system",
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
):
|
||||
# @PARAM: context (Optional[Dict]) - Log context.
|
||||
def _add_log(self, task_id: str, level: str, message: str, context: Optional[Dict[str, Any]] = None):
|
||||
with belief_scope("TaskManager._add_log", f"task_id={task_id}"):
|
||||
task = self.tasks.get(task_id)
|
||||
if not task:
|
||||
return
|
||||
|
||||
# Filter logs based on task_log_level configuration
|
||||
if not should_log_task_level(level):
|
||||
return
|
||||
|
||||
# Create log entry with new fields
|
||||
log_entry = LogEntry(
|
||||
level=level,
|
||||
message=message,
|
||||
source=source,
|
||||
metadata=metadata,
|
||||
context=context # Keep for backward compatibility
|
||||
)
|
||||
|
||||
# Add to in-memory logs (for backward compatibility with legacy JSON field)
|
||||
log_entry = LogEntry(level=level, message=message, context=context)
|
||||
task.logs.append(log_entry)
|
||||
|
||||
# Add to buffer for batch persistence
|
||||
with self._log_buffer_lock:
|
||||
if task_id not in self._log_buffer:
|
||||
self._log_buffer[task_id] = []
|
||||
self._log_buffer[task_id].append(log_entry)
|
||||
self.persistence_service.persist_task(task)
|
||||
|
||||
# Notify subscribers (for real-time WebSocket updates)
|
||||
# Notify subscribers
|
||||
if task_id in self.subscribers:
|
||||
for queue in self.subscribers[task_id]:
|
||||
self.loop.call_soon_threadsafe(queue.put_nowait, log_entry)
|
||||
@@ -520,7 +353,7 @@ class TaskManager:
|
||||
# [/DEF:resume_task_with_password:Function]
|
||||
|
||||
# [DEF:clear_tasks:Function]
|
||||
# @PURPOSE: Clears tasks based on status filter (also deletes associated logs).
|
||||
# @PURPOSE: Clears tasks based on status filter.
|
||||
# @PRE: status is Optional[TaskStatus].
|
||||
# @POST: Tasks matching filter (or all non-active) cleared from registry and database.
|
||||
# @PARAM: status (Optional[TaskStatus]) - Filter by task status.
|
||||
@@ -554,13 +387,9 @@ class TaskManager:
|
||||
|
||||
del self.tasks[tid]
|
||||
|
||||
# Remove from persistence (task_records and task_logs via CASCADE)
|
||||
# Remove from persistence
|
||||
self.persistence_service.delete_tasks(tasks_to_remove)
|
||||
|
||||
# Also explicitly delete logs (in case CASCADE is not set up)
|
||||
if tasks_to_remove:
|
||||
self.log_persistence_service.delete_logs_for_tasks(tasks_to_remove)
|
||||
|
||||
logger.info(f"Cleared {len(tasks_to_remove)} tasks.")
|
||||
return len(tasks_to_remove)
|
||||
# [/DEF:clear_tasks:Function]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:TaskManagerModels:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: task, models, pydantic, enum, state
|
||||
# @PURPOSE: Defines the data models and enumerations used by the Task Manager.
|
||||
# @LAYER: Core
|
||||
@@ -17,7 +16,6 @@ from pydantic import BaseModel, Field
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TaskStatus:Enum]
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: task, status, state, enum
|
||||
# @PURPOSE: Defines the possible states a task can be in during its lifecycle.
|
||||
class TaskStatus(str, Enum):
|
||||
@@ -29,73 +27,17 @@ class TaskStatus(str, Enum):
|
||||
AWAITING_INPUT = "AWAITING_INPUT"
|
||||
# [/DEF:TaskStatus:Enum]
|
||||
|
||||
# [DEF:LogLevel:Enum]
|
||||
# @SEMANTICS: log, level, severity, enum
|
||||
# @PURPOSE: Defines the possible log levels for task logging.
|
||||
# @TIER: STANDARD
|
||||
class LogLevel(str, Enum):
|
||||
DEBUG = "DEBUG"
|
||||
INFO = "INFO"
|
||||
WARNING = "WARNING"
|
||||
ERROR = "ERROR"
|
||||
# [/DEF:LogLevel:Enum]
|
||||
|
||||
# [DEF:LogEntry:Class]
|
||||
# @SEMANTICS: log, entry, record, pydantic
|
||||
# @PURPOSE: A Pydantic model representing a single, structured log entry associated with a task.
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: Each log entry has a unique timestamp and source.
|
||||
class LogEntry(BaseModel):
|
||||
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
||||
level: str = Field(default="INFO")
|
||||
level: str
|
||||
message: str
|
||||
source: str = Field(default="system") # Component attribution: plugin, superset_api, git, etc.
|
||||
context: Optional[Dict[str, Any]] = None # Legacy field, kept for backward compatibility
|
||||
metadata: Optional[Dict[str, Any]] = None # Structured metadata (e.g., dashboard_id, progress)
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
# [/DEF:LogEntry:Class]
|
||||
|
||||
# [DEF:TaskLog:Class]
|
||||
# @SEMANTICS: task, log, persistent, pydantic
|
||||
# @PURPOSE: A Pydantic model representing a persisted log entry from the database.
|
||||
# @TIER: STANDARD
|
||||
# @RELATION: MAPS_TO -> TaskLogRecord
|
||||
class TaskLog(BaseModel):
|
||||
id: int
|
||||
task_id: str
|
||||
timestamp: datetime
|
||||
level: str
|
||||
source: str
|
||||
message: str
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
# [/DEF:TaskLog:Class]
|
||||
|
||||
# [DEF:LogFilter:Class]
|
||||
# @SEMANTICS: log, filter, query, pydantic
|
||||
# @PURPOSE: Filter parameters for querying task logs.
|
||||
# @TIER: STANDARD
|
||||
class LogFilter(BaseModel):
|
||||
level: Optional[str] = None # Filter by log level
|
||||
source: Optional[str] = None # Filter by source component
|
||||
search: Optional[str] = None # Text search in message
|
||||
offset: int = Field(default=0, ge=0)
|
||||
limit: int = Field(default=100, ge=1, le=1000)
|
||||
# [/DEF:LogFilter:Class]
|
||||
|
||||
# [DEF:LogStats:Class]
|
||||
# @SEMANTICS: log, stats, aggregation, pydantic
|
||||
# @PURPOSE: Statistics about log entries for a task.
|
||||
# @TIER: STANDARD
|
||||
class LogStats(BaseModel):
|
||||
total_count: int
|
||||
by_level: Dict[str, int] # {"INFO": 10, "ERROR": 2}
|
||||
by_source: Dict[str, int] # {"plugin": 5, "superset_api": 7}
|
||||
# [/DEF:LogStats:Class]
|
||||
|
||||
# [DEF:Task:Class]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: task, job, execution, state, pydantic
|
||||
# @PURPOSE: A Pydantic model representing a single execution instance of a plugin, including its status, parameters, and logs.
|
||||
class Task(BaseModel):
|
||||
|
||||
@@ -11,10 +11,9 @@ from typing import List, Optional, Dict, Any
|
||||
import json
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import and_, or_
|
||||
from ...models.task import TaskRecord, TaskLogRecord
|
||||
from ...models.task import TaskRecord
|
||||
from ..database import TasksSessionLocal
|
||||
from .models import Task, TaskStatus, LogEntry, TaskLog, LogFilter, LogStats
|
||||
from .models import Task, TaskStatus, LogEntry
|
||||
from ..logger import logger, belief_scope
|
||||
# [/SECTION]
|
||||
|
||||
@@ -37,7 +36,6 @@ class TaskPersistenceService:
|
||||
# @PRE: isinstance(task, Task)
|
||||
# @POST: Task record created or updated in database.
|
||||
# @PARAM: task (Task) - The task object to persist.
|
||||
# @SIDE_EFFECT: Writes to task_records table in tasks.db
|
||||
def persist_task(self, task: Task) -> None:
|
||||
with belief_scope("TaskPersistenceService.persist_task", f"task_id={task.id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
@@ -52,19 +50,8 @@ class TaskPersistenceService:
|
||||
record.environment_id = task.params.get("environment_id") or task.params.get("source_env_id")
|
||||
record.started_at = task.started_at
|
||||
record.finished_at = task.finished_at
|
||||
|
||||
# Ensure params and result are JSON serializable
|
||||
def json_serializable(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: json_serializable(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [json_serializable(v) for v in obj]
|
||||
elif isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
return obj
|
||||
|
||||
record.params = json_serializable(task.params)
|
||||
record.result = json_serializable(task.result)
|
||||
record.params = task.params
|
||||
record.result = task.result
|
||||
|
||||
# Store logs as JSON, converting datetime to string
|
||||
record.logs = []
|
||||
@@ -72,9 +59,6 @@ class TaskPersistenceService:
|
||||
log_dict = log.dict()
|
||||
if isinstance(log_dict.get('timestamp'), datetime):
|
||||
log_dict['timestamp'] = log_dict['timestamp'].isoformat()
|
||||
# Also clean up any datetimes in context
|
||||
if log_dict.get('context'):
|
||||
log_dict['context'] = json_serializable(log_dict['context'])
|
||||
record.logs.append(log_dict)
|
||||
|
||||
# Extract error if failed
|
||||
@@ -171,215 +155,4 @@ class TaskPersistenceService:
|
||||
# [/DEF:delete_tasks:Function]
|
||||
|
||||
# [/DEF:TaskPersistenceService:Class]
|
||||
|
||||
# [DEF:TaskLogPersistenceService:Class]
|
||||
# @SEMANTICS: persistence, service, database, log, sqlalchemy
|
||||
# @PURPOSE: Provides methods to save and query task logs from the task_logs table.
|
||||
# @TIER: CRITICAL
|
||||
# @RELATION: DEPENDS_ON -> TaskLogRecord
|
||||
# @INVARIANT: Log entries are batch-inserted for performance.
|
||||
class TaskLogPersistenceService:
|
||||
"""
|
||||
Service for persisting and querying task logs.
|
||||
Supports batch inserts, filtering, and statistics.
|
||||
"""
|
||||
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initialize the log persistence service.
|
||||
# @POST: Service is ready.
|
||||
def __init__(self):
|
||||
pass
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:add_logs:Function]
|
||||
# @PURPOSE: Batch insert log entries for a task.
|
||||
# @PRE: logs is a list of LogEntry objects.
|
||||
# @POST: All logs inserted into task_logs table.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @PARAM: logs (List[LogEntry]) - Log entries to insert.
|
||||
# @SIDE_EFFECT: Writes to task_logs table.
|
||||
def add_logs(self, task_id: str, logs: List[LogEntry]) -> None:
|
||||
if not logs:
|
||||
return
|
||||
with belief_scope("TaskLogPersistenceService.add_logs", f"task_id={task_id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
for log in logs:
|
||||
record = TaskLogRecord(
|
||||
task_id=task_id,
|
||||
timestamp=log.timestamp,
|
||||
level=log.level,
|
||||
source=log.source or "system",
|
||||
message=log.message,
|
||||
metadata_json=json.dumps(log.metadata) if log.metadata else None
|
||||
)
|
||||
session.add(record)
|
||||
session.commit()
|
||||
except Exception as e:
|
||||
session.rollback()
|
||||
logger.error(f"Failed to add logs for task {task_id}: {e}")
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:add_logs:Function]
|
||||
|
||||
# [DEF:get_logs:Function]
|
||||
# @PURPOSE: Query logs for a task with filtering and pagination.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Returns list of TaskLog objects matching filters.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @PARAM: log_filter (LogFilter) - Filter parameters.
|
||||
# @RETURN: List[TaskLog] - Filtered log entries.
|
||||
def get_logs(self, task_id: str, log_filter: LogFilter) -> List[TaskLog]:
|
||||
with belief_scope("TaskLogPersistenceService.get_logs", f"task_id={task_id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
query = session.query(TaskLogRecord).filter(TaskLogRecord.task_id == task_id)
|
||||
|
||||
# Apply filters
|
||||
if log_filter.level:
|
||||
query = query.filter(TaskLogRecord.level == log_filter.level.upper())
|
||||
if log_filter.source:
|
||||
query = query.filter(TaskLogRecord.source == log_filter.source)
|
||||
if log_filter.search:
|
||||
search_pattern = f"%{log_filter.search}%"
|
||||
query = query.filter(TaskLogRecord.message.ilike(search_pattern))
|
||||
|
||||
# Order by timestamp ascending (oldest first)
|
||||
query = query.order_by(TaskLogRecord.timestamp.asc())
|
||||
|
||||
# Apply pagination
|
||||
records = query.offset(log_filter.offset).limit(log_filter.limit).all()
|
||||
|
||||
logs = []
|
||||
for record in records:
|
||||
metadata = None
|
||||
if record.metadata_json:
|
||||
try:
|
||||
metadata = json.loads(record.metadata_json)
|
||||
except json.JSONDecodeError:
|
||||
metadata = None
|
||||
|
||||
logs.append(TaskLog(
|
||||
id=record.id,
|
||||
task_id=record.task_id,
|
||||
timestamp=record.timestamp,
|
||||
level=record.level,
|
||||
source=record.source,
|
||||
message=record.message,
|
||||
metadata=metadata
|
||||
))
|
||||
|
||||
return logs
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:get_logs:Function]
|
||||
|
||||
# [DEF:get_log_stats:Function]
|
||||
# @PURPOSE: Get statistics about logs for a task.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Returns LogStats with counts by level and source.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @RETURN: LogStats - Statistics about task logs.
|
||||
def get_log_stats(self, task_id: str) -> LogStats:
|
||||
with belief_scope("TaskLogPersistenceService.get_log_stats", f"task_id={task_id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
# Get total count
|
||||
total_count = session.query(TaskLogRecord).filter(
|
||||
TaskLogRecord.task_id == task_id
|
||||
).count()
|
||||
|
||||
# Get counts by level
|
||||
from sqlalchemy import func
|
||||
level_counts = session.query(
|
||||
TaskLogRecord.level,
|
||||
func.count(TaskLogRecord.id)
|
||||
).filter(
|
||||
TaskLogRecord.task_id == task_id
|
||||
).group_by(TaskLogRecord.level).all()
|
||||
|
||||
by_level = {level: count for level, count in level_counts}
|
||||
|
||||
# Get counts by source
|
||||
source_counts = session.query(
|
||||
TaskLogRecord.source,
|
||||
func.count(TaskLogRecord.id)
|
||||
).filter(
|
||||
TaskLogRecord.task_id == task_id
|
||||
).group_by(TaskLogRecord.source).all()
|
||||
|
||||
by_source = {source: count for source, count in source_counts}
|
||||
|
||||
return LogStats(
|
||||
total_count=total_count,
|
||||
by_level=by_level,
|
||||
by_source=by_source
|
||||
)
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:get_log_stats:Function]
|
||||
|
||||
# [DEF:get_sources:Function]
|
||||
# @PURPOSE: Get unique sources for a task's logs.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: Returns list of unique source strings.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @RETURN: List[str] - Unique source names.
|
||||
def get_sources(self, task_id: str) -> List[str]:
|
||||
with belief_scope("TaskLogPersistenceService.get_sources", f"task_id={task_id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
from sqlalchemy import distinct
|
||||
sources = session.query(distinct(TaskLogRecord.source)).filter(
|
||||
TaskLogRecord.task_id == task_id
|
||||
).all()
|
||||
return [s[0] for s in sources]
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:get_sources:Function]
|
||||
|
||||
# [DEF:delete_logs_for_task:Function]
|
||||
# @PURPOSE: Delete all logs for a specific task.
|
||||
# @PRE: task_id is a valid task ID.
|
||||
# @POST: All logs for the task are deleted.
|
||||
# @PARAM: task_id (str) - The task ID.
|
||||
# @SIDE_EFFECT: Deletes from task_logs table.
|
||||
def delete_logs_for_task(self, task_id: str) -> None:
|
||||
with belief_scope("TaskLogPersistenceService.delete_logs_for_task", f"task_id={task_id}"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
session.query(TaskLogRecord).filter(
|
||||
TaskLogRecord.task_id == task_id
|
||||
).delete(synchronize_session=False)
|
||||
session.commit()
|
||||
except Exception as e:
|
||||
session.rollback()
|
||||
logger.error(f"Failed to delete logs for task {task_id}: {e}")
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:delete_logs_for_task:Function]
|
||||
|
||||
# [DEF:delete_logs_for_tasks:Function]
|
||||
# @PURPOSE: Delete all logs for multiple tasks.
|
||||
# @PRE: task_ids is a list of task IDs.
|
||||
# @POST: All logs for the tasks are deleted.
|
||||
# @PARAM: task_ids (List[str]) - List of task IDs.
|
||||
def delete_logs_for_tasks(self, task_ids: List[str]) -> None:
|
||||
if not task_ids:
|
||||
return
|
||||
with belief_scope("TaskLogPersistenceService.delete_logs_for_tasks"):
|
||||
session: Session = TasksSessionLocal()
|
||||
try:
|
||||
session.query(TaskLogRecord).filter(
|
||||
TaskLogRecord.task_id.in_(task_ids)
|
||||
).delete(synchronize_session=False)
|
||||
session.commit()
|
||||
except Exception as e:
|
||||
session.rollback()
|
||||
logger.error(f"Failed to delete logs for tasks: {e}")
|
||||
finally:
|
||||
session.close()
|
||||
# [/DEF:delete_logs_for_tasks:Function]
|
||||
|
||||
# [/DEF:TaskLogPersistenceService:Class]
|
||||
# [/DEF:TaskPersistenceModule:Module]
|
||||
@@ -1,168 +0,0 @@
|
||||
# [DEF:TaskLoggerModule:Module]
|
||||
# @SEMANTICS: task, logger, context, plugin, attribution
|
||||
# @PURPOSE: Provides a dedicated logger for tasks with automatic source attribution.
|
||||
# @LAYER: Core
|
||||
# @RELATION: DEPENDS_ON -> TaskManager, CALLS -> TaskManager._add_log
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: Each TaskLogger instance is bound to a specific task_id and default source.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
from typing import Dict, Any, Optional, Callable
|
||||
from datetime import datetime
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TaskLogger:Class]
|
||||
# @SEMANTICS: logger, task, source, attribution
|
||||
# @PURPOSE: A wrapper around TaskManager._add_log that carries task_id and source context.
|
||||
# @TIER: CRITICAL
|
||||
# @INVARIANT: All log calls include the task_id and source.
|
||||
# @UX_STATE: Idle -> Logging -> (system records log)
|
||||
class TaskLogger:
|
||||
"""
|
||||
A dedicated logger for tasks that automatically tags logs with source attribution.
|
||||
|
||||
Usage:
|
||||
logger = TaskLogger(task_id="abc123", add_log_fn=task_manager._add_log, source="plugin")
|
||||
logger.info("Starting backup process")
|
||||
logger.error("Failed to connect", metadata={"error_code": 500})
|
||||
|
||||
# Create sub-logger with different source
|
||||
api_logger = logger.with_source("superset_api")
|
||||
api_logger.info("Fetching dashboards")
|
||||
"""
|
||||
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initialize the TaskLogger with task context.
|
||||
# @PRE: add_log_fn is a callable that accepts (task_id, level, message, context, source, metadata).
|
||||
# @POST: TaskLogger is ready to log messages.
|
||||
# @PARAM: task_id (str) - The ID of the task.
|
||||
# @PARAM: add_log_fn (Callable) - Function to add log to TaskManager.
|
||||
# @PARAM: source (str) - Default source for logs (default: "plugin").
|
||||
def __init__(
|
||||
self,
|
||||
task_id: str,
|
||||
add_log_fn: Callable,
|
||||
source: str = "plugin"
|
||||
):
|
||||
self._task_id = task_id
|
||||
self._add_log = add_log_fn
|
||||
self._default_source = source
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:with_source:Function]
|
||||
# @PURPOSE: Create a sub-logger with a different default source.
|
||||
# @PRE: source is a non-empty string.
|
||||
# @POST: Returns new TaskLogger with the same task_id but different source.
|
||||
# @PARAM: source (str) - New default source.
|
||||
# @RETURN: TaskLogger - New logger instance.
|
||||
def with_source(self, source: str) -> "TaskLogger":
|
||||
"""Create a sub-logger with a different source context."""
|
||||
return TaskLogger(
|
||||
task_id=self._task_id,
|
||||
add_log_fn=self._add_log,
|
||||
source=source
|
||||
)
|
||||
# [/DEF:with_source:Function]
|
||||
|
||||
# [DEF:_log:Function]
|
||||
# @PURPOSE: Internal method to log a message at a given level.
|
||||
# @PRE: level is a valid log level string.
|
||||
# @POST: Log entry added via add_log_fn.
|
||||
# @PARAM: level (str) - Log level (DEBUG, INFO, WARNING, ERROR).
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (Optional[str]) - Override source for this log entry.
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional structured data.
|
||||
def _log(
|
||||
self,
|
||||
level: str,
|
||||
message: str,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Internal logging method."""
|
||||
self._add_log(
|
||||
task_id=self._task_id,
|
||||
level=level,
|
||||
message=message,
|
||||
source=source or self._default_source,
|
||||
metadata=metadata
|
||||
)
|
||||
# [/DEF:_log:Function]
|
||||
|
||||
# [DEF:debug:Function]
|
||||
# @PURPOSE: Log a DEBUG level message.
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (Optional[str]) - Override source.
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional data.
|
||||
def debug(
|
||||
self,
|
||||
message: str,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
self._log("DEBUG", message, source, metadata)
|
||||
# [/DEF:debug:Function]
|
||||
|
||||
# [DEF:info:Function]
|
||||
# @PURPOSE: Log an INFO level message.
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (Optional[str]) - Override source.
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional data.
|
||||
def info(
|
||||
self,
|
||||
message: str,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
self._log("INFO", message, source, metadata)
|
||||
# [/DEF:info:Function]
|
||||
|
||||
# [DEF:warning:Function]
|
||||
# @PURPOSE: Log a WARNING level message.
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (Optional[str]) - Override source.
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional data.
|
||||
def warning(
|
||||
self,
|
||||
message: str,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
self._log("WARNING", message, source, metadata)
|
||||
# [/DEF:warning:Function]
|
||||
|
||||
# [DEF:error:Function]
|
||||
# @PURPOSE: Log an ERROR level message.
|
||||
# @PARAM: message (str) - Log message.
|
||||
# @PARAM: source (Optional[str]) - Override source.
|
||||
# @PARAM: metadata (Optional[Dict]) - Additional data.
|
||||
def error(
|
||||
self,
|
||||
message: str,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
self._log("ERROR", message, source, metadata)
|
||||
# [/DEF:error:Function]
|
||||
|
||||
# [DEF:progress:Function]
|
||||
# @PURPOSE: Log a progress update with percentage.
|
||||
# @PRE: percent is between 0 and 100.
|
||||
# @POST: Log entry with progress metadata added.
|
||||
# @PARAM: message (str) - Progress message.
|
||||
# @PARAM: percent (float) - Progress percentage (0-100).
|
||||
# @PARAM: source (Optional[str]) - Override source.
|
||||
def progress(
|
||||
self,
|
||||
message: str,
|
||||
percent: float,
|
||||
source: Optional[str] = None
|
||||
) -> None:
|
||||
"""Log a progress update with percentage."""
|
||||
metadata = {"progress": min(100, max(0, percent))}
|
||||
self._log("INFO", message, source, metadata)
|
||||
# [/DEF:progress:Function]
|
||||
|
||||
# [/DEF:TaskLogger:Class]
|
||||
|
||||
# [/DEF:TaskLoggerModule:Module]
|
||||
@@ -140,16 +140,7 @@ class APIClient:
|
||||
app_logger.info("[authenticate][Enter] Authenticating to %s", self.base_url)
|
||||
try:
|
||||
login_url = f"{self.base_url}/security/login"
|
||||
# Log the payload keys and values (masking password)
|
||||
masked_auth = {k: ("******" if k == "password" else v) for k, v in self.auth.items()}
|
||||
app_logger.info(f"[authenticate][Debug] Login URL: {login_url}")
|
||||
app_logger.info(f"[authenticate][Debug] Auth payload: {masked_auth}")
|
||||
|
||||
response = self.session.post(login_url, json=self.auth, timeout=self.request_settings["timeout"])
|
||||
|
||||
if response.status_code != 200:
|
||||
app_logger.error(f"[authenticate][Error] Status: {response.status_code}, Response: {response.text}")
|
||||
|
||||
response.raise_for_status()
|
||||
access_token = response.json()["access_token"]
|
||||
|
||||
@@ -162,9 +153,6 @@ class APIClient:
|
||||
app_logger.info("[authenticate][Exit] Authenticated successfully.")
|
||||
return self._tokens
|
||||
except requests.exceptions.HTTPError as e:
|
||||
status_code = e.response.status_code if e.response is not None else None
|
||||
if status_code in [502, 503, 504]:
|
||||
raise NetworkError(f"Environment unavailable during authentication (Status {status_code})", status_code=status_code) from e
|
||||
raise AuthenticationError(f"Authentication failed: {e}") from e
|
||||
except (requests.exceptions.RequestException, KeyError) as e:
|
||||
raise NetworkError(f"Network or parsing error during authentication: {e}") from e
|
||||
@@ -221,8 +209,6 @@ class APIClient:
|
||||
def _handle_http_error(self, e: requests.exceptions.HTTPError, endpoint: str):
|
||||
with belief_scope("_handle_http_error"):
|
||||
status_code = e.response.status_code
|
||||
if status_code == 502 or status_code == 503 or status_code == 504:
|
||||
raise NetworkError(f"Environment unavailable (Status {status_code})", status_code=status_code) from e
|
||||
if status_code == 404: raise DashboardNotFoundError(endpoint) from e
|
||||
if status_code == 403: raise PermissionDeniedError() from e
|
||||
if status_code == 401: raise AuthenticationError() from e
|
||||
|
||||
@@ -35,7 +35,8 @@ init_db()
|
||||
# @RETURN: ConfigManager - The shared config manager instance.
|
||||
def get_config_manager() -> ConfigManager:
|
||||
"""Dependency injector for the ConfigManager."""
|
||||
return config_manager
|
||||
with belief_scope("get_config_manager"):
|
||||
return config_manager
|
||||
# [/DEF:get_config_manager:Function]
|
||||
|
||||
plugin_dir = Path(__file__).parent / "plugins"
|
||||
@@ -57,7 +58,8 @@ logger.info("SchedulerService initialized")
|
||||
# @RETURN: PluginLoader - The shared plugin loader instance.
|
||||
def get_plugin_loader() -> PluginLoader:
|
||||
"""Dependency injector for the PluginLoader."""
|
||||
return plugin_loader
|
||||
with belief_scope("get_plugin_loader"):
|
||||
return plugin_loader
|
||||
# [/DEF:get_plugin_loader:Function]
|
||||
|
||||
# [DEF:get_task_manager:Function]
|
||||
@@ -67,7 +69,8 @@ def get_plugin_loader() -> PluginLoader:
|
||||
# @RETURN: TaskManager - The shared task manager instance.
|
||||
def get_task_manager() -> TaskManager:
|
||||
"""Dependency injector for the TaskManager."""
|
||||
return task_manager
|
||||
with belief_scope("get_task_manager"):
|
||||
return task_manager
|
||||
# [/DEF:get_task_manager:Function]
|
||||
|
||||
# [DEF:get_scheduler_service:Function]
|
||||
@@ -77,7 +80,8 @@ def get_task_manager() -> TaskManager:
|
||||
# @RETURN: SchedulerService - The shared scheduler service instance.
|
||||
def get_scheduler_service() -> SchedulerService:
|
||||
"""Dependency injector for the SchedulerService."""
|
||||
return scheduler_service
|
||||
with belief_scope("get_scheduler_service"):
|
||||
return scheduler_service
|
||||
# [/DEF:get_scheduler_service:Function]
|
||||
|
||||
# [DEF:oauth2_scheme:Variable]
|
||||
@@ -94,24 +98,25 @@ oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
|
||||
# @PARAM: db (Session) - Auth database session.
|
||||
# @RETURN: User - The authenticated user.
|
||||
def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_db)):
|
||||
credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
try:
|
||||
payload = decode_token(token)
|
||||
username: str = payload.get("sub")
|
||||
if username is None:
|
||||
with belief_scope("get_current_user"):
|
||||
credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
try:
|
||||
payload = decode_token(token)
|
||||
username: str = payload.get("sub")
|
||||
if username is None:
|
||||
raise credentials_exception
|
||||
except JWTError:
|
||||
raise credentials_exception
|
||||
except JWTError:
|
||||
raise credentials_exception
|
||||
|
||||
repo = AuthRepository(db)
|
||||
user = repo.get_user_by_username(username)
|
||||
if user is None:
|
||||
raise credentials_exception
|
||||
return user
|
||||
|
||||
repo = AuthRepository(db)
|
||||
user = repo.get_user_by_username(username)
|
||||
if user is None:
|
||||
raise credentials_exception
|
||||
return user
|
||||
# [/DEF:get_current_user:Function]
|
||||
|
||||
# [DEF:has_permission:Function]
|
||||
@@ -124,23 +129,24 @@ def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_
|
||||
# @RETURN: User - The authenticated user if permission granted.
|
||||
def has_permission(resource: str, action: str):
|
||||
def permission_checker(current_user: User = Depends(get_current_user)):
|
||||
# Union of all permissions across all roles
|
||||
for role in current_user.roles:
|
||||
for perm in role.permissions:
|
||||
if perm.resource == resource and perm.action == action:
|
||||
return current_user
|
||||
|
||||
# Special case for Admin role (full access)
|
||||
if any(role.name == "Admin" for role in current_user.roles):
|
||||
return current_user
|
||||
|
||||
from .core.auth.logger import log_security_event
|
||||
log_security_event("PERMISSION_DENIED", current_user.username, {"resource": resource, "action": action})
|
||||
with belief_scope("has_permission", f"{resource}:{action}"):
|
||||
# Union of all permissions across all roles
|
||||
for role in current_user.roles:
|
||||
for perm in role.permissions:
|
||||
if perm.resource == resource and perm.action == action:
|
||||
return current_user
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Permission denied for {resource}:{action}"
|
||||
)
|
||||
# Special case for Admin role (full access)
|
||||
if any(role.name == "Admin" for role in current_user.roles):
|
||||
return current_user
|
||||
|
||||
from .core.auth.logger import log_security_event
|
||||
log_security_event("PERMISSION_DENIED", current_user.username, {"resource": resource, "action": action})
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Permission denied for {resource}:{action}"
|
||||
)
|
||||
return permission_checker
|
||||
# [/DEF:has_permission:Function]
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.models.connection:Module]
|
||||
#
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: database, connection, configuration, sqlalchemy, sqlite
|
||||
# @PURPOSE: Defines the database schema for external database connection configurations.
|
||||
# @LAYER: Domain
|
||||
@@ -16,7 +15,6 @@ import uuid
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:ConnectionConfig:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Stores credentials for external databases used for column mapping.
|
||||
class ConnectionConfig(Base):
|
||||
__tablename__ = "connection_configs"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# [DEF:GitModels:Module]
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: git, models, sqlalchemy, database, schema
|
||||
# @PURPOSE: Git-specific SQLAlchemy models for configuration and repository tracking.
|
||||
# @LAYER: Model
|
||||
@@ -27,10 +26,11 @@ class SyncStatus(str, enum.Enum):
|
||||
DIRTY = "DIRTY"
|
||||
CONFLICT = "CONFLICT"
|
||||
|
||||
# [DEF:GitServerConfig:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Configuration for a Git server connection.
|
||||
class GitServerConfig(Base):
|
||||
"""
|
||||
[DEF:GitServerConfig:Class]
|
||||
Configuration for a Git server connection.
|
||||
"""
|
||||
__tablename__ = "git_server_configs"
|
||||
|
||||
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
@@ -41,12 +41,12 @@ class GitServerConfig(Base):
|
||||
default_repository = Column(String(255), nullable=True)
|
||||
status = Column(Enum(GitStatus), default=GitStatus.UNKNOWN)
|
||||
last_validated = Column(DateTime, default=datetime.utcnow)
|
||||
# [/DEF:GitServerConfig:Class]
|
||||
|
||||
# [DEF:GitRepository:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Tracking for a local Git repository linked to a dashboard.
|
||||
class GitRepository(Base):
|
||||
"""
|
||||
[DEF:GitRepository:Class]
|
||||
Tracking for a local Git repository linked to a dashboard.
|
||||
"""
|
||||
__tablename__ = "git_repositories"
|
||||
|
||||
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
@@ -56,12 +56,12 @@ class GitRepository(Base):
|
||||
local_path = Column(String(255), nullable=False)
|
||||
current_branch = Column(String(255), default="main")
|
||||
sync_status = Column(Enum(SyncStatus), default=SyncStatus.CLEAN)
|
||||
# [/DEF:GitRepository:Class]
|
||||
|
||||
# [DEF:DeploymentEnvironment:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Target Superset environments for dashboard deployment.
|
||||
class DeploymentEnvironment(Base):
|
||||
"""
|
||||
[DEF:DeploymentEnvironment:Class]
|
||||
Target Superset environments for dashboard deployment.
|
||||
"""
|
||||
__tablename__ = "deployment_environments"
|
||||
|
||||
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
@@ -69,6 +69,5 @@ class DeploymentEnvironment(Base):
|
||||
superset_url = Column(String(255), nullable=False)
|
||||
superset_token = Column(String(255), nullable=False)
|
||||
is_active = Column(Boolean, default=True)
|
||||
# [/DEF:DeploymentEnvironment:Class]
|
||||
|
||||
# [/DEF:GitModels:Module]
|
||||
# [/DEF:GitModels:Module]
|
||||
@@ -59,7 +59,6 @@ class DatabaseMapping(Base):
|
||||
# [/DEF:DatabaseMapping:Class]
|
||||
|
||||
# [DEF:MigrationJob:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Represents a single migration execution job.
|
||||
class MigrationJob(Base):
|
||||
__tablename__ = "migration_jobs"
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
# [DEF:backend.src.models.storage:Module]
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: storage, file, model, pydantic
|
||||
# @PURPOSE: Data models for the storage system.
|
||||
# @LAYER: Domain
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
# [DEF:FileCategory:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Enumeration of supported file categories in the storage system.
|
||||
class FileCategory(str, Enum):
|
||||
BACKUP = "backups"
|
||||
@@ -18,7 +11,6 @@ class FileCategory(str, Enum):
|
||||
# [/DEF:FileCategory:Class]
|
||||
|
||||
# [DEF:StorageConfig:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Configuration model for the storage system, defining paths and naming patterns.
|
||||
class StorageConfig(BaseModel):
|
||||
root_path: str = Field(default="backups", description="Absolute path to the storage root directory.")
|
||||
@@ -28,7 +20,6 @@ class StorageConfig(BaseModel):
|
||||
# [/DEF:StorageConfig:Class]
|
||||
|
||||
# [DEF:StoredFile:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Data model representing metadata for a file stored in the system.
|
||||
class StoredFile(BaseModel):
|
||||
name: str = Field(..., description="Name of the file (including extension).")
|
||||
@@ -37,6 +28,4 @@ class StoredFile(BaseModel):
|
||||
created_at: datetime = Field(..., description="Creation timestamp.")
|
||||
category: FileCategory = Field(..., description="Category of the file.")
|
||||
mime_type: Optional[str] = Field(None, description="MIME type of the file.")
|
||||
# [/DEF:StoredFile:Class]
|
||||
|
||||
# [/DEF:backend.src.models.storage:Module]
|
||||
# [/DEF:StoredFile:Class]
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.models.task:Module]
|
||||
#
|
||||
# @TIER: TRIVIAL
|
||||
# @SEMANTICS: database, task, record, sqlalchemy, sqlite
|
||||
# @PURPOSE: Defines the database schema for task execution records.
|
||||
# @LAYER: Domain
|
||||
@@ -9,14 +8,13 @@
|
||||
# @INVARIANT: All primary keys are UUID strings.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
from sqlalchemy import Column, String, DateTime, JSON, ForeignKey, Text, Integer, Index
|
||||
from sqlalchemy import Column, String, DateTime, JSON, ForeignKey
|
||||
from sqlalchemy.sql import func
|
||||
from .mapping import Base
|
||||
import uuid
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TaskRecord:Class]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Represents a persistent record of a task execution.
|
||||
class TaskRecord(Base):
|
||||
__tablename__ = "task_records"
|
||||
@@ -27,35 +25,11 @@ class TaskRecord(Base):
|
||||
environment_id = Column(String, ForeignKey("environments.id"), nullable=True)
|
||||
started_at = Column(DateTime(timezone=True), nullable=True)
|
||||
finished_at = Column(DateTime(timezone=True), nullable=True)
|
||||
logs = Column(JSON, nullable=True) # Store structured logs as JSON (legacy, kept for backward compatibility)
|
||||
logs = Column(JSON, nullable=True) # Store structured logs as JSON
|
||||
error = Column(String, nullable=True)
|
||||
result = Column(JSON, nullable=True)
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
params = Column(JSON, nullable=True)
|
||||
# [/DEF:TaskRecord:Class]
|
||||
|
||||
# [DEF:TaskLogRecord:Class]
|
||||
# @PURPOSE: Represents a single persistent log entry for a task.
|
||||
# @TIER: CRITICAL
|
||||
# @RELATION: DEPENDS_ON -> TaskRecord
|
||||
# @INVARIANT: Each log entry belongs to exactly one task.
|
||||
class TaskLogRecord(Base):
|
||||
__tablename__ = "task_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
task_id = Column(String, ForeignKey("task_records.id", ondelete="CASCADE"), nullable=False, index=True)
|
||||
timestamp = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
level = Column(String(16), nullable=False) # INFO, WARNING, ERROR, DEBUG
|
||||
source = Column(String(64), nullable=False, default="system") # plugin, superset_api, git, etc.
|
||||
message = Column(Text, nullable=False)
|
||||
metadata_json = Column(Text, nullable=True) # JSON string for additional metadata
|
||||
|
||||
# Composite indexes for efficient filtering
|
||||
__table_args__ = (
|
||||
Index('ix_task_logs_task_timestamp', 'task_id', 'timestamp'),
|
||||
Index('ix_task_logs_task_level', 'task_id', 'level'),
|
||||
Index('ix_task_logs_task_source', 'task_id', 'source'),
|
||||
)
|
||||
# [/DEF:TaskLogRecord:Class]
|
||||
|
||||
# [/DEF:backend.src.models.task:Module]
|
||||
@@ -5,14 +5,13 @@
|
||||
# @RELATION: IMPLEMENTS -> PluginBase
|
||||
# @RELATION: DEPENDS_ON -> superset_tool.client
|
||||
# @RELATION: DEPENDS_ON -> superset_tool.utils
|
||||
# @RELATION: USES -> TaskContext
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Dict, Any
|
||||
from pathlib import Path
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from ..core.plugin_base import PluginBase
|
||||
from ..core.logger import belief_scope, logger as app_logger
|
||||
from ..core.logger import belief_scope
|
||||
from ..core.superset_client import SupersetClient
|
||||
from ..core.utils.network import SupersetAPIError
|
||||
from ..core.utils.fileio import (
|
||||
@@ -24,7 +23,6 @@ from ..core.utils.fileio import (
|
||||
RetentionPolicy
|
||||
)
|
||||
from ..dependencies import get_config_manager
|
||||
from ..core.task_manager.context import TaskContext
|
||||
|
||||
# [DEF:BackupPlugin:Class]
|
||||
# @PURPOSE: Implementation of the backup plugin logic.
|
||||
@@ -112,12 +110,11 @@ class BackupPlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes the dashboard backup logic with TaskContext support.
|
||||
# @PURPOSE: Executes the dashboard backup logic.
|
||||
# @PARAM: params (Dict[str, Any]) - Backup parameters (env, backup_path).
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: Target environment must be configured. params must be a dictionary.
|
||||
# @POST: All dashboards are exported and archived.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
with belief_scope("execute"):
|
||||
config_manager = get_config_manager()
|
||||
env_id = params.get("environment_id")
|
||||
@@ -136,14 +133,8 @@ class BackupPlugin(PluginBase):
|
||||
# Use 'backups' subfolder within the storage root
|
||||
backup_path = Path(storage_settings.root_path) / "backups"
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app_logger
|
||||
log = context.logger if context else app_logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
storage_log = log.with_source("storage") if context else log
|
||||
|
||||
log.info(f"Starting backup for environment: {env}")
|
||||
from ..core.logger import logger as app_logger
|
||||
app_logger.info(f"[BackupPlugin][Entry] Starting backup for {env}.")
|
||||
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
@@ -157,30 +148,24 @@ class BackupPlugin(PluginBase):
|
||||
client = SupersetClient(env_config)
|
||||
|
||||
dashboard_count, dashboard_meta = client.get_dashboards()
|
||||
superset_log.info(f"Found {dashboard_count} dashboards to export")
|
||||
app_logger.info(f"[BackupPlugin][Progress] Found {dashboard_count} dashboards to export in {env}.")
|
||||
|
||||
if dashboard_count == 0:
|
||||
log.info("No dashboards to back up")
|
||||
app_logger.info("[BackupPlugin][Exit] No dashboards to back up.")
|
||||
return
|
||||
|
||||
total = len(dashboard_meta)
|
||||
for idx, db in enumerate(dashboard_meta, 1):
|
||||
for db in dashboard_meta:
|
||||
dashboard_id = db.get('id')
|
||||
dashboard_title = db.get('dashboard_title', 'Unknown Dashboard')
|
||||
if not dashboard_id:
|
||||
continue
|
||||
|
||||
# Report progress
|
||||
progress_pct = (idx / total) * 100
|
||||
log.progress(f"Backing up dashboard: {dashboard_title}", percent=progress_pct)
|
||||
|
||||
try:
|
||||
dashboard_base_dir_name = sanitize_filename(f"{dashboard_title}")
|
||||
dashboard_dir = backup_path / env.upper() / dashboard_base_dir_name
|
||||
dashboard_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
zip_content, filename = client.export_dashboard(dashboard_id)
|
||||
superset_log.debug(f"Exported dashboard: {dashboard_title}")
|
||||
|
||||
save_and_unpack_dashboard(
|
||||
zip_content=zip_content,
|
||||
@@ -190,19 +175,18 @@ class BackupPlugin(PluginBase):
|
||||
)
|
||||
|
||||
archive_exports(str(dashboard_dir), policy=RetentionPolicy())
|
||||
storage_log.debug(f"Archived dashboard: {dashboard_title}")
|
||||
|
||||
except (SupersetAPIError, RequestException, IOError, OSError) as db_error:
|
||||
log.error(f"Failed to export dashboard {dashboard_title} (ID: {dashboard_id}): {db_error}")
|
||||
app_logger.error(f"[BackupPlugin][Failure] Failed to export dashboard {dashboard_title} (ID: {dashboard_id}): {db_error}", exc_info=True)
|
||||
continue
|
||||
|
||||
consolidate_archive_folders(backup_path / env.upper())
|
||||
remove_empty_directories(str(backup_path / env.upper()))
|
||||
|
||||
log.info(f"Backup completed successfully for {env}")
|
||||
app_logger.info(f"[BackupPlugin][CoherenceCheck:Passed] Backup logic completed for {env}.")
|
||||
|
||||
except (RequestException, IOError, KeyError) as e:
|
||||
log.error(f"Fatal error during backup for {env}: {e}")
|
||||
app_logger.critical(f"[BackupPlugin][Failure] Fatal error during backup for {env}: {e}", exc_info=True)
|
||||
raise e
|
||||
# [/DEF:execute:Function]
|
||||
# [/DEF:BackupPlugin:Class]
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
# @PURPOSE: Implements a plugin for system diagnostics and debugging Superset API responses.
|
||||
# @LAYER: Plugins
|
||||
# @RELATION: Inherits from PluginBase. Uses SupersetClient from core.
|
||||
# @RELATION: USES -> TaskContext
|
||||
# @CONSTRAINT: Must use belief_scope for logging.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
@@ -11,7 +10,6 @@ from typing import Dict, Any, Optional
|
||||
from ..core.plugin_base import PluginBase
|
||||
from ..core.superset_client import SupersetClient
|
||||
from ..core.logger import logger, belief_scope
|
||||
from ..core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:DebugPlugin:Class]
|
||||
@@ -116,29 +114,20 @@ class DebugPlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes the debug logic with TaskContext support.
|
||||
# @PURPOSE: Executes the debug logic.
|
||||
# @PARAM: params (Dict[str, Any]) - Debug parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: action must be provided in params.
|
||||
# @POST: Debug action is executed and results returned.
|
||||
# @RETURN: Dict[str, Any] - Execution results.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
|
||||
async def execute(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("execute"):
|
||||
action = params.get("action")
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
debug_log = log.with_source("debug") if context else log
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
|
||||
debug_log.info(f"Executing debug action: {action}")
|
||||
|
||||
if action == "test-db-api":
|
||||
return await self._test_db_api(params, superset_log)
|
||||
return await self._test_db_api(params)
|
||||
elif action == "get-dataset-structure":
|
||||
return await self._get_dataset_structure(params, superset_log)
|
||||
return await self._get_dataset_structure(params)
|
||||
else:
|
||||
debug_log.error(f"Unknown action: {action}")
|
||||
raise ValueError(f"Unknown action: {action}")
|
||||
# [/DEF:execute:Function]
|
||||
|
||||
@@ -147,37 +136,33 @@ class DebugPlugin(PluginBase):
|
||||
# @PRE: source_env and target_env params exist in params.
|
||||
# @POST: Returns DB counts for both envs.
|
||||
# @PARAM: params (Dict) - Plugin parameters.
|
||||
# @PARAM: log - Logger instance for superset_api source.
|
||||
# @RETURN: Dict - Comparison results.
|
||||
async def _test_db_api(self, params: Dict[str, Any], log) -> Dict[str, Any]:
|
||||
async def _test_db_api(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("_test_db_api"):
|
||||
source_env_name = params.get("source_env")
|
||||
target_env_name = params.get("target_env")
|
||||
target_env_name = params.get("target_env")
|
||||
|
||||
if not source_env_name or not target_env_name:
|
||||
raise ValueError("source_env and target_env are required for test-db-api")
|
||||
if not source_env_name or not target_env_name:
|
||||
raise ValueError("source_env and target_env are required for test-db-api")
|
||||
|
||||
from ..dependencies import get_config_manager
|
||||
config_manager = get_config_manager()
|
||||
from ..dependencies import get_config_manager
|
||||
config_manager = get_config_manager()
|
||||
|
||||
results = {}
|
||||
for name in [source_env_name, target_env_name]:
|
||||
log.info(f"Testing database API for environment: {name}")
|
||||
env_config = config_manager.get_environment(name)
|
||||
if not env_config:
|
||||
log.error(f"Environment '{name}' not found.")
|
||||
raise ValueError(f"Environment '{name}' not found.")
|
||||
results = {}
|
||||
for name in [source_env_name, target_env_name]:
|
||||
env_config = config_manager.get_environment(name)
|
||||
if not env_config:
|
||||
raise ValueError(f"Environment '{name}' not found.")
|
||||
|
||||
client = SupersetClient(env_config)
|
||||
client.authenticate()
|
||||
count, dbs = client.get_databases()
|
||||
log.debug(f"Found {count} databases in {name}")
|
||||
results[name] = {
|
||||
"count": count,
|
||||
"databases": dbs
|
||||
}
|
||||
client = SupersetClient(env_config)
|
||||
client.authenticate()
|
||||
count, dbs = client.get_databases()
|
||||
results[name] = {
|
||||
"count": count,
|
||||
"databases": dbs
|
||||
}
|
||||
|
||||
return results
|
||||
return results
|
||||
# [/DEF:_test_db_api:Function]
|
||||
|
||||
# [DEF:_get_dataset_structure:Function]
|
||||
@@ -185,31 +170,26 @@ class DebugPlugin(PluginBase):
|
||||
# @PRE: env and dataset_id params exist in params.
|
||||
# @POST: Returns dataset JSON structure.
|
||||
# @PARAM: params (Dict) - Plugin parameters.
|
||||
# @PARAM: log - Logger instance for superset_api source.
|
||||
# @RETURN: Dict - Dataset structure.
|
||||
async def _get_dataset_structure(self, params: Dict[str, Any], log) -> Dict[str, Any]:
|
||||
async def _get_dataset_structure(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("_get_dataset_structure"):
|
||||
env_name = params.get("env")
|
||||
dataset_id = params.get("dataset_id")
|
||||
dataset_id = params.get("dataset_id")
|
||||
|
||||
if not env_name or dataset_id is None:
|
||||
raise ValueError("env and dataset_id are required for get-dataset-structure")
|
||||
if not env_name or dataset_id is None:
|
||||
raise ValueError("env and dataset_id are required for get-dataset-structure")
|
||||
|
||||
log.info(f"Fetching structure for dataset {dataset_id} in {env_name}")
|
||||
from ..dependencies import get_config_manager
|
||||
config_manager = get_config_manager()
|
||||
env_config = config_manager.get_environment(env_name)
|
||||
if not env_config:
|
||||
raise ValueError(f"Environment '{env_name}' not found.")
|
||||
|
||||
from ..dependencies import get_config_manager
|
||||
config_manager = get_config_manager()
|
||||
env_config = config_manager.get_environment(env_name)
|
||||
if not env_config:
|
||||
log.error(f"Environment '{env_name}' not found.")
|
||||
raise ValueError(f"Environment '{env_name}' not found.")
|
||||
|
||||
client = SupersetClient(env_config)
|
||||
client.authenticate()
|
||||
client = SupersetClient(env_config)
|
||||
client.authenticate()
|
||||
|
||||
dataset_response = client.get_dataset(dataset_id)
|
||||
log.debug(f"Retrieved dataset structure for {dataset_id}")
|
||||
return dataset_response.get('result') or {}
|
||||
dataset_response = client.get_dataset(dataset_id)
|
||||
return dataset_response.get('result') or {}
|
||||
# [/DEF:_get_dataset_structure:Function]
|
||||
|
||||
# [/DEF:DebugPlugin:Class]
|
||||
|
||||
@@ -61,7 +61,6 @@ class GitLLMExtension:
|
||||
return "Update dashboard configurations (LLM generation failed)"
|
||||
|
||||
return response.choices[0].message.content.strip()
|
||||
# [/DEF:suggest_commit_message:Function]
|
||||
# [/DEF:GitLLMExtension:Class]
|
||||
|
||||
# [/DEF:backend/src/plugins/git/llm_extension:Module]
|
||||
@@ -7,7 +7,6 @@
|
||||
# @RELATION: USES -> src.services.git_service.GitService
|
||||
# @RELATION: USES -> src.core.superset_client.SupersetClient
|
||||
# @RELATION: USES -> src.core.config_manager.ConfigManager
|
||||
# @RELATION: USES -> TaskContext
|
||||
#
|
||||
# @INVARIANT: Все операции с Git должны выполняться через GitService.
|
||||
# @CONSTRAINT: Плагин работает только с распакованными YAML-экспортами Superset.
|
||||
@@ -21,10 +20,9 @@ from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
from src.core.plugin_base import PluginBase
|
||||
from src.services.git_service import GitService
|
||||
from src.core.logger import logger as app_logger, belief_scope
|
||||
from src.core.logger import logger, belief_scope
|
||||
from src.core.config_manager import ConfigManager
|
||||
from src.core.superset_client import SupersetClient
|
||||
from src.core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:GitPlugin:Class]
|
||||
@@ -37,7 +35,7 @@ class GitPlugin(PluginBase):
|
||||
# @POST: Инициализированы git_service и config_manager.
|
||||
def __init__(self):
|
||||
with belief_scope("GitPlugin.__init__"):
|
||||
app_logger.info("Initializing GitPlugin.")
|
||||
logger.info("[GitPlugin.__init__][Entry] Initializing GitPlugin.")
|
||||
self.git_service = GitService()
|
||||
|
||||
# Robust config path resolution:
|
||||
@@ -52,13 +50,13 @@ class GitPlugin(PluginBase):
|
||||
try:
|
||||
from src.dependencies import config_manager
|
||||
self.config_manager = config_manager
|
||||
app_logger.info("GitPlugin initialized using shared config_manager.")
|
||||
logger.info("[GitPlugin.__init__][Exit] GitPlugin initialized using shared config_manager.")
|
||||
return
|
||||
except:
|
||||
config_path = "config.json"
|
||||
|
||||
self.config_manager = ConfigManager(config_path)
|
||||
app_logger.info(f"GitPlugin initialized with {config_path}")
|
||||
logger.info(f"[GitPlugin.__init__][Exit] GitPlugin initialized with {config_path}")
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
@property
|
||||
@@ -138,41 +136,33 @@ class GitPlugin(PluginBase):
|
||||
logger.info("[GitPlugin.initialize][Action] Initializing Git Integration Plugin logic.")
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Основной метод выполнения задач плагина с поддержкой TaskContext.
|
||||
# @PURPOSE: Основной метод выполнения задач плагина.
|
||||
# @PRE: task_data содержит 'operation' и 'dashboard_id'.
|
||||
# @POST: Возвращает результат выполнения операции.
|
||||
# @PARAM: task_data (Dict[str, Any]) - Данные задачи.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @RETURN: Dict[str, Any] - Статус и сообщение.
|
||||
# @RELATION: CALLS -> self._handle_sync
|
||||
# @RELATION: CALLS -> self._handle_deploy
|
||||
async def execute(self, task_data: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
|
||||
async def execute(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("GitPlugin.execute"):
|
||||
operation = task_data.get("operation")
|
||||
dashboard_id = task_data.get("dashboard_id")
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app_logger
|
||||
log = context.logger if context else app_logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
git_log = log.with_source("git") if context else log
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
|
||||
log.info(f"Executing operation: {operation} for dashboard {dashboard_id}")
|
||||
logger.info(f"[GitPlugin.execute][Entry] Executing operation: {operation} for dashboard {dashboard_id}")
|
||||
|
||||
if operation == "sync":
|
||||
source_env_id = task_data.get("source_env_id")
|
||||
result = await self._handle_sync(dashboard_id, source_env_id, log, git_log, superset_log)
|
||||
result = await self._handle_sync(dashboard_id, source_env_id)
|
||||
elif operation == "deploy":
|
||||
env_id = task_data.get("environment_id")
|
||||
result = await self._handle_deploy(dashboard_id, env_id, log, git_log, superset_log)
|
||||
result = await self._handle_deploy(dashboard_id, env_id)
|
||||
elif operation == "history":
|
||||
result = {"status": "success", "message": "History available via API"}
|
||||
else:
|
||||
log.error(f"Unknown operation: {operation}")
|
||||
logger.error(f"[GitPlugin.execute][Coherence:Failed] Unknown operation: {operation}")
|
||||
raise ValueError(f"Unknown operation: {operation}")
|
||||
|
||||
log.info(f"Operation {operation} completed.")
|
||||
logger.info(f"[GitPlugin.execute][Exit] Operation {operation} completed.")
|
||||
return result
|
||||
# [/DEF:execute:Function]
|
||||
|
||||
@@ -186,13 +176,13 @@ class GitPlugin(PluginBase):
|
||||
# @SIDE_EFFECT: Изменяет файлы в локальной рабочей директории репозитория.
|
||||
# @RELATION: CALLS -> src.services.git_service.GitService.get_repo
|
||||
# @RELATION: CALLS -> src.core.superset_client.SupersetClient.export_dashboard
|
||||
async def _handle_sync(self, dashboard_id: int, source_env_id: Optional[str] = None, log=None, git_log=None, superset_log=None) -> Dict[str, str]:
|
||||
async def _handle_sync(self, dashboard_id: int, source_env_id: Optional[str] = None) -> Dict[str, str]:
|
||||
with belief_scope("GitPlugin._handle_sync"):
|
||||
try:
|
||||
# 1. Получение репозитория
|
||||
repo = self.git_service.get_repo(dashboard_id)
|
||||
repo_path = Path(repo.working_dir)
|
||||
git_log.info(f"Target repo path: {repo_path}")
|
||||
logger.info(f"[_handle_sync][Action] Target repo path: {repo_path}")
|
||||
|
||||
# 2. Настройка клиента Superset
|
||||
env = self._get_env(source_env_id)
|
||||
@@ -200,11 +190,11 @@ class GitPlugin(PluginBase):
|
||||
client.authenticate()
|
||||
|
||||
# 3. Экспорт дашборда
|
||||
superset_log.info(f"Exporting dashboard {dashboard_id} from {env.name}")
|
||||
logger.info(f"[_handle_sync][Action] Exporting dashboard {dashboard_id} from {env.name}")
|
||||
zip_bytes, _ = client.export_dashboard(dashboard_id)
|
||||
|
||||
# 4. Распаковка с выравниванием структуры (flattening)
|
||||
git_log.info(f"Unpacking export to {repo_path}")
|
||||
logger.info(f"[_handle_sync][Action] Unpacking export to {repo_path}")
|
||||
|
||||
# Список папок/файлов, которые мы ожидаем от Superset
|
||||
managed_dirs = ["dashboards", "charts", "datasets", "databases"]
|
||||
@@ -228,7 +218,7 @@ class GitPlugin(PluginBase):
|
||||
raise ValueError("Export ZIP is empty")
|
||||
|
||||
root_folder = namelist[0].split('/')[0]
|
||||
git_log.info(f"Detected root folder in ZIP: {root_folder}")
|
||||
logger.info(f"[_handle_sync][Action] Detected root folder in ZIP: {root_folder}")
|
||||
|
||||
for member in zf.infolist():
|
||||
if member.filename.startswith(root_folder + "/") and len(member.filename) > len(root_folder) + 1:
|
||||
@@ -264,13 +254,10 @@ class GitPlugin(PluginBase):
|
||||
# @POST: Дашборд импортирован в целевой Superset.
|
||||
# @PARAM: dashboard_id (int) - ID дашборда.
|
||||
# @PARAM: env_id (str) - ID целевого окружения.
|
||||
# @PARAM: log - Main logger instance.
|
||||
# @PARAM: git_log - Git-specific logger instance.
|
||||
# @PARAM: superset_log - Superset API-specific logger instance.
|
||||
# @RETURN: Dict[str, Any] - Результат деплоя.
|
||||
# @SIDE_EFFECT: Создает и удаляет временный ZIP-файл.
|
||||
# @RELATION: CALLS -> src.core.superset_client.SupersetClient.import_dashboard
|
||||
async def _handle_deploy(self, dashboard_id: int, env_id: str, log=None, git_log=None, superset_log=None) -> Dict[str, Any]:
|
||||
async def _handle_deploy(self, dashboard_id: int, env_id: str) -> Dict[str, Any]:
|
||||
with belief_scope("GitPlugin._handle_deploy"):
|
||||
try:
|
||||
if not env_id:
|
||||
@@ -281,7 +268,7 @@ class GitPlugin(PluginBase):
|
||||
repo_path = Path(repo.working_dir)
|
||||
|
||||
# 2. Упаковка в ZIP
|
||||
git_log.info(f"Packing repository {repo_path} for deployment.")
|
||||
logger.info(f"[_handle_deploy][Action] Packing repository {repo_path} for deployment.")
|
||||
zip_buffer = io.BytesIO()
|
||||
|
||||
# Superset expects a root directory in the ZIP (e.g., dashboard_export_20240101T000000/)
|
||||
@@ -310,7 +297,7 @@ class GitPlugin(PluginBase):
|
||||
|
||||
# 4. Импорт
|
||||
temp_zip_path = repo_path / f"deploy_{dashboard_id}.zip"
|
||||
git_log.info(f"Saving temporary zip to {temp_zip_path}")
|
||||
logger.info(f"[_handle_deploy][Action] Saving temporary zip to {temp_zip_path}")
|
||||
with open(temp_zip_path, "wb") as f:
|
||||
f.write(zip_buffer.getvalue())
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# [DEF:backend/src/plugins/llm_analysis/__init__.py:Module]
|
||||
# @TIER: TRIVIAL
|
||||
# @PURPOSE: Initialize the LLM Analysis plugin package.
|
||||
# @LAYER: Domain
|
||||
|
||||
"""
|
||||
LLM Analysis Plugin for automated dashboard validation and dataset documentation.
|
||||
@@ -9,4 +8,4 @@ LLM Analysis Plugin for automated dashboard validation and dataset documentation
|
||||
|
||||
from .plugin import DashboardValidationPlugin, DocumentationPlugin
|
||||
|
||||
# [/DEF:backend/src/plugins/llm_analysis/__init__.py:Module]
|
||||
# [/DEF:backend/src/plugins/llm_analysis/__init__.py]
|
||||
@@ -24,7 +24,7 @@ class LLMProviderConfig(BaseModel):
|
||||
provider_type: LLMProviderType
|
||||
name: str
|
||||
base_url: str
|
||||
api_key: Optional[str] = None
|
||||
api_key: str
|
||||
default_model: str
|
||||
is_active: bool = True
|
||||
# [/DEF:LLMProviderConfig:Class]
|
||||
@@ -58,4 +58,4 @@ class ValidationResult(BaseModel):
|
||||
raw_response: Optional[str] = None
|
||||
# [/DEF:ValidationResult:Class]
|
||||
|
||||
# [/DEF:backend/src/plugins/llm_analysis/models.py:Module]
|
||||
# [/DEF:backend/src/plugins/llm_analysis/models.py]
|
||||
@@ -1,34 +1,24 @@
|
||||
# [DEF:backend/src/plugins/llm_analysis/plugin.py:Module]
|
||||
# [DEF:backend.src.plugins.llm_analysis.plugin:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: plugin, llm, analysis, documentation
|
||||
# @PURPOSE: Implements DashboardValidationPlugin and DocumentationPlugin.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: INHERITS -> backend.src.core.plugin_base.PluginBase
|
||||
# @RELATION: CALLS -> backend.src.plugins.llm_analysis.service.ScreenshotService
|
||||
# @RELATION: CALLS -> backend.src.plugins.llm_analysis.service.LLMClient
|
||||
# @RELATION: CALLS -> backend.src.services.llm_provider.LLMProviderService
|
||||
# @RELATION: USES -> TaskContext
|
||||
# @INVARIANT: All LLM interactions must be executed as asynchronous tasks.
|
||||
# @RELATION: INHERITS_FROM -> backend.src.core.plugin_base.PluginBase
|
||||
|
||||
from typing import Dict, Any, Optional, List
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from ...core.plugin_base import PluginBase
|
||||
from ...core.logger import belief_scope, logger
|
||||
from ...core.database import SessionLocal
|
||||
from ...core.config_manager import ConfigManager
|
||||
from ...services.llm_provider import LLMProviderService
|
||||
from ...core.superset_client import SupersetClient
|
||||
from .service import ScreenshotService, LLMClient
|
||||
from .models import LLMProviderType, ValidationStatus, ValidationResult, DetectedIssue
|
||||
from ...models.llm import ValidationRecord
|
||||
from ...core.task_manager.context import TaskContext
|
||||
|
||||
# [DEF:DashboardValidationPlugin:Class]
|
||||
# @PURPOSE: Plugin for automated dashboard health analysis using LLMs.
|
||||
# @RELATION: IMPLEMENTS -> backend.src.core.plugin_base.PluginBase
|
||||
class DashboardValidationPlugin(PluginBase):
|
||||
@property
|
||||
def id(self) -> str:
|
||||
@@ -57,28 +47,14 @@ class DashboardValidationPlugin(PluginBase):
|
||||
"required": ["dashboard_id", "environment_id", "provider_id"]
|
||||
}
|
||||
|
||||
# [DEF:DashboardValidationPlugin.execute:Function]
|
||||
# @PURPOSE: Executes the dashboard validation task with TaskContext support.
|
||||
# @PARAM: params (Dict[str, Any]) - Validation parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: params contains dashboard_id, environment_id, and provider_id.
|
||||
# @POST: Returns a dictionary with validation results and persists them to the database.
|
||||
# @SIDE_EFFECT: Captures a screenshot, calls LLM API, and writes to the database.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
with belief_scope("execute", f"plugin_id={self.id}"):
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
llm_log = log.with_source("llm") if context else log
|
||||
screenshot_log = log.with_source("screenshot") if context else log
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
|
||||
log.info(f"Executing {self.name} with params: {params}")
|
||||
logger.info(f"Executing {self.name} with params: {params}")
|
||||
|
||||
dashboard_id = params.get("dashboard_id")
|
||||
env_id = params.get("environment_id")
|
||||
provider_id = params.get("provider_id")
|
||||
task_id = params.get("_task_id")
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
@@ -87,91 +63,34 @@ class DashboardValidationPlugin(PluginBase):
|
||||
config_mgr = get_config_manager()
|
||||
env = config_mgr.get_environment(env_id)
|
||||
if not env:
|
||||
log.error(f"Environment {env_id} not found")
|
||||
raise ValueError(f"Environment {env_id} not found")
|
||||
|
||||
# 2. Get LLM Provider
|
||||
llm_service = LLMProviderService(db)
|
||||
db_provider = llm_service.get_provider(provider_id)
|
||||
if not db_provider:
|
||||
log.error(f"LLM Provider {provider_id} not found")
|
||||
raise ValueError(f"LLM Provider {provider_id} not found")
|
||||
|
||||
llm_log.debug(f"Retrieved provider config:")
|
||||
llm_log.debug(f" Provider ID: {db_provider.id}")
|
||||
llm_log.debug(f" Provider Name: {db_provider.name}")
|
||||
llm_log.debug(f" Provider Type: {db_provider.provider_type}")
|
||||
llm_log.debug(f" Base URL: {db_provider.base_url}")
|
||||
llm_log.debug(f" Default Model: {db_provider.default_model}")
|
||||
llm_log.debug(f" Is Active: {db_provider.is_active}")
|
||||
|
||||
api_key = llm_service.get_decrypted_api_key(provider_id)
|
||||
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
|
||||
|
||||
# Check if API key was successfully decrypted
|
||||
if not api_key:
|
||||
raise ValueError(
|
||||
f"Failed to decrypt API key for provider {provider_id}. "
|
||||
f"The provider may have been encrypted with a different encryption key. "
|
||||
f"Please update the provider with a new API key through the UI."
|
||||
)
|
||||
|
||||
# 3. Capture Screenshot
|
||||
screenshot_service = ScreenshotService(env)
|
||||
os.makedirs("ss-tools-storage/screenshots", exist_ok=True)
|
||||
screenshot_path = f"ss-tools-storage/screenshots/{dashboard_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
||||
|
||||
storage_root = config_mgr.get_config().settings.storage.root_path
|
||||
screenshots_dir = os.path.join(storage_root, "screenshots")
|
||||
os.makedirs(screenshots_dir, exist_ok=True)
|
||||
|
||||
filename = f"{dashboard_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
||||
screenshot_path = os.path.join(screenshots_dir, filename)
|
||||
|
||||
screenshot_log.info(f"Capturing screenshot for dashboard {dashboard_id}")
|
||||
await screenshot_service.capture_dashboard(dashboard_id, screenshot_path)
|
||||
screenshot_log.debug(f"Screenshot saved to: {screenshot_path}")
|
||||
|
||||
# 4. Fetch Logs (from Environment /api/v1/log/)
|
||||
# 4. Fetch Logs (Last 100 lines from backend.log)
|
||||
logs = []
|
||||
try:
|
||||
client = SupersetClient(env)
|
||||
|
||||
# Calculate time window (last 24 hours)
|
||||
start_time = (datetime.now() - timedelta(hours=24)).isoformat()
|
||||
|
||||
# Construct filter for logs
|
||||
# Note: We filter by dashboard_id matching the object
|
||||
query_params = {
|
||||
"filters": [
|
||||
{"col": "dashboard_id", "opr": "eq", "value": dashboard_id},
|
||||
{"col": "dttm", "opr": "gt", "value": start_time}
|
||||
],
|
||||
"order_column": "dttm",
|
||||
"order_direction": "desc",
|
||||
"page": 0,
|
||||
"page_size": 100
|
||||
}
|
||||
|
||||
superset_log.debug(f"Fetching logs for dashboard {dashboard_id}")
|
||||
response = client.network.request(
|
||||
method="GET",
|
||||
endpoint="/log/",
|
||||
params={"q": json.dumps(query_params)}
|
||||
)
|
||||
|
||||
if isinstance(response, dict) and "result" in response:
|
||||
for item in response["result"]:
|
||||
action = item.get("action", "unknown")
|
||||
dttm = item.get("dttm", "")
|
||||
details = item.get("json", "")
|
||||
logs.append(f"[{dttm}] {action}: {details}")
|
||||
|
||||
if not logs:
|
||||
logs = ["No recent logs found for this dashboard."]
|
||||
superset_log.debug("No recent logs found for this dashboard")
|
||||
|
||||
except Exception as e:
|
||||
superset_log.warning(f"Failed to fetch logs from environment: {e}")
|
||||
logs = [f"Error fetching remote logs: {str(e)}"]
|
||||
log_file = "backend.log"
|
||||
if os.path.exists(log_file):
|
||||
with open(log_file, "r") as f:
|
||||
# Read last 100 lines
|
||||
all_lines = f.readlines()
|
||||
logs = all_lines[-100:]
|
||||
|
||||
if not logs:
|
||||
logs = ["No logs found in backend.log"]
|
||||
|
||||
# 5. Analyze with LLM
|
||||
llm_client = LLMClient(
|
||||
@@ -181,15 +100,7 @@ class DashboardValidationPlugin(PluginBase):
|
||||
default_model=db_provider.default_model
|
||||
)
|
||||
|
||||
llm_log.info(f"Analyzing dashboard {dashboard_id} with LLM")
|
||||
analysis = await llm_client.analyze_dashboard(screenshot_path, logs)
|
||||
|
||||
# Log analysis summary to task logs for better visibility
|
||||
llm_log.info(f"[ANALYSIS_SUMMARY] Status: {analysis['status']}")
|
||||
llm_log.info(f"[ANALYSIS_SUMMARY] Summary: {analysis['summary']}")
|
||||
if analysis.get("issues"):
|
||||
for i, issue in enumerate(analysis["issues"]):
|
||||
llm_log.info(f"[ANALYSIS_ISSUE][{i+1}] {issue.get('severity')}: {issue.get('message')} (Location: {issue.get('location', 'N/A')})")
|
||||
|
||||
# 6. Persist Result
|
||||
validation_result = ValidationResult(
|
||||
@@ -214,24 +125,19 @@ class DashboardValidationPlugin(PluginBase):
|
||||
|
||||
# 7. Notification on failure (US1 / FR-015)
|
||||
if validation_result.status == ValidationStatus.FAIL:
|
||||
log.warning(f"Dashboard {dashboard_id} validation FAILED. Summary: {validation_result.summary}")
|
||||
logger.warning(f"Dashboard {dashboard_id} validation FAILED. Summary: {validation_result.summary}")
|
||||
# Placeholder for Email/Pulse notification dispatch
|
||||
# In a real implementation, we would call a NotificationService here
|
||||
# with a payload containing the summary and a link to the report.
|
||||
|
||||
# Final log to ensure all analysis is visible in task logs
|
||||
log.info(f"Validation completed for dashboard {dashboard_id}. Status: {validation_result.status.value}")
|
||||
|
||||
return validation_result.dict()
|
||||
|
||||
finally:
|
||||
db.close()
|
||||
# [/DEF:DashboardValidationPlugin.execute:Function]
|
||||
# [/DEF:DashboardValidationPlugin:Class]
|
||||
|
||||
# [DEF:DocumentationPlugin:Class]
|
||||
# @PURPOSE: Plugin for automated dataset documentation using LLMs.
|
||||
# @RELATION: IMPLEMENTS -> backend.src.core.plugin_base.PluginBase
|
||||
class DocumentationPlugin(PluginBase):
|
||||
@property
|
||||
def id(self) -> str:
|
||||
@@ -260,23 +166,9 @@ class DocumentationPlugin(PluginBase):
|
||||
"required": ["dataset_id", "environment_id", "provider_id"]
|
||||
}
|
||||
|
||||
# [DEF:DocumentationPlugin.execute:Function]
|
||||
# @PURPOSE: Executes the dataset documentation task with TaskContext support.
|
||||
# @PARAM: params (Dict[str, Any]) - Documentation parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: params contains dataset_id, environment_id, and provider_id.
|
||||
# @POST: Returns generated documentation and updates the dataset in Superset.
|
||||
# @SIDE_EFFECT: Calls LLM API and updates dataset metadata in Superset.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
with belief_scope("execute", f"plugin_id={self.id}"):
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
llm_log = log.with_source("llm") if context else log
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
|
||||
log.info(f"Executing {self.name} with params: {params}")
|
||||
logger.info(f"Executing {self.name} with params: {params}")
|
||||
|
||||
dataset_id = params.get("dataset_id")
|
||||
env_id = params.get("environment_id")
|
||||
@@ -289,40 +181,24 @@ class DocumentationPlugin(PluginBase):
|
||||
config_mgr = get_config_manager()
|
||||
env = config_mgr.get_environment(env_id)
|
||||
if not env:
|
||||
log.error(f"Environment {env_id} not found")
|
||||
raise ValueError(f"Environment {env_id} not found")
|
||||
|
||||
# 2. Get LLM Provider
|
||||
llm_service = LLMProviderService(db)
|
||||
db_provider = llm_service.get_provider(provider_id)
|
||||
if not db_provider:
|
||||
log.error(f"LLM Provider {provider_id} not found")
|
||||
raise ValueError(f"LLM Provider {provider_id} not found")
|
||||
|
||||
llm_log.debug(f"Retrieved provider config:")
|
||||
llm_log.debug(f" Provider ID: {db_provider.id}")
|
||||
llm_log.debug(f" Provider Name: {db_provider.name}")
|
||||
llm_log.debug(f" Provider Type: {db_provider.provider_type}")
|
||||
llm_log.debug(f" Base URL: {db_provider.base_url}")
|
||||
llm_log.debug(f" Default Model: {db_provider.default_model}")
|
||||
|
||||
api_key = llm_service.get_decrypted_api_key(provider_id)
|
||||
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
|
||||
|
||||
# Check if API key was successfully decrypted
|
||||
if not api_key:
|
||||
raise ValueError(
|
||||
f"Failed to decrypt API key for provider {provider_id}. "
|
||||
f"The provider may have been encrypted with a different encryption key. "
|
||||
f"Please update the provider with a new API key through the UI."
|
||||
)
|
||||
|
||||
# 3. Fetch Metadata (US2 / T024)
|
||||
from ...core.superset_client import SupersetClient
|
||||
client = SupersetClient(env)
|
||||
|
||||
superset_log.debug(f"Fetching dataset {dataset_id}")
|
||||
# Optimistic locking check (T045)
|
||||
dataset = client.get_dataset(int(dataset_id))
|
||||
# dataset structure might vary, ensure we get the right field
|
||||
original_changed_on = dataset.get("changed_on_utc") or dataset.get("result", {}).get("changed_on_utc")
|
||||
|
||||
# Extract columns and existing descriptions
|
||||
columns_data = []
|
||||
@@ -332,7 +208,6 @@ class DocumentationPlugin(PluginBase):
|
||||
"type": col.get("type"),
|
||||
"description": col.get("description")
|
||||
})
|
||||
superset_log.debug(f"Extracted {len(columns_data)} columns from dataset")
|
||||
|
||||
# 4. Construct Prompt & Analyze (US2 / T025)
|
||||
llm_client = LLMClient(
|
||||
@@ -360,10 +235,18 @@ class DocumentationPlugin(PluginBase):
|
||||
"""
|
||||
|
||||
# Using a generic chat completion for text-only US2
|
||||
llm_log.info(f"Generating documentation for dataset {dataset_id}")
|
||||
doc_result = await llm_client.get_json_completion([{"role": "user", "content": prompt}])
|
||||
response = await llm_client.client.chat.completions.create(
|
||||
model=db_provider.default_model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
response_format={"type": "json_object"}
|
||||
)
|
||||
|
||||
import json
|
||||
doc_result = json.loads(response.choices[0].message.content)
|
||||
|
||||
# 5. Update Metadata (US2 / T026)
|
||||
# This part normally goes to mapping_service, but we implement the logic here for the plugin flow
|
||||
# We'll update the dataset in Superset
|
||||
update_payload = {
|
||||
"description": doc_result["dataset_description"],
|
||||
"columns": []
|
||||
@@ -378,16 +261,12 @@ class DocumentationPlugin(PluginBase):
|
||||
"description": col_doc["description"]
|
||||
})
|
||||
|
||||
superset_log.info(f"Updating dataset {dataset_id} with generated documentation")
|
||||
client.update_dataset(int(dataset_id), update_payload)
|
||||
|
||||
log.info(f"Documentation completed for dataset {dataset_id}")
|
||||
|
||||
return doc_result
|
||||
|
||||
finally:
|
||||
db.close()
|
||||
# [/DEF:DocumentationPlugin.execute:Function]
|
||||
# [/DEF:DocumentationPlugin:Class]
|
||||
|
||||
# [/DEF:backend/src/plugins/llm_analysis/plugin.py:Module]
|
||||
# [/DEF:backend.src.plugins.llm_analysis.plugin:Module]
|
||||
@@ -14,7 +14,6 @@ from ...core.logger import belief_scope, logger
|
||||
# @PARAM: dashboard_id (str) - ID of the dashboard to validate.
|
||||
# @PARAM: cron_expression (str) - Standard cron expression for scheduling.
|
||||
# @PARAM: params (Dict[str, Any]) - Task parameters (environment_id, provider_id).
|
||||
# @SIDE_EFFECT: Adds a job to the scheduler service.
|
||||
def schedule_dashboard_validation(dashboard_id: str, cron_expression: str, params: Dict[str, Any]):
|
||||
with belief_scope("schedule_dashboard_validation", f"dashboard_id={dashboard_id}"):
|
||||
scheduler = get_scheduler_service()
|
||||
@@ -39,12 +38,7 @@ def schedule_dashboard_validation(dashboard_id: str, cron_expression: str, param
|
||||
**_parse_cron(cron_expression)
|
||||
)
|
||||
logger.info(f"Scheduled validation for dashboard {dashboard_id} with cron {cron_expression}")
|
||||
# [/DEF:schedule_dashboard_validation:Function]
|
||||
|
||||
# [DEF:_parse_cron:Function]
|
||||
# @PURPOSE: Basic cron parser placeholder.
|
||||
# @PARAM: cron (str) - Cron expression.
|
||||
# @RETURN: Dict[str, str] - Parsed cron parts.
|
||||
def _parse_cron(cron: str) -> Dict[str, str]:
|
||||
# Basic cron parser placeholder
|
||||
parts = cron.split()
|
||||
@@ -57,6 +51,6 @@ def _parse_cron(cron: str) -> Dict[str, str]:
|
||||
"month": parts[3],
|
||||
"day_of_week": parts[4]
|
||||
}
|
||||
# [/DEF:_parse_cron:Function]
|
||||
# [/DEF:schedule_dashboard_validation:Function]
|
||||
|
||||
# [/DEF:backend/src/plugins/llm_analysis/scheduler.py:Module]
|
||||
# [/DEF:backend/src/plugins/llm_analysis/scheduler.py]
|
||||
@@ -1,4 +1,4 @@
|
||||
# [DEF:backend/src/plugins/llm_analysis/service.py:Module]
|
||||
# [DEF:backend.src.plugins.llm_analysis.service:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: service, llm, screenshot, playwright, openai
|
||||
# @PURPOSE: Services for LLM interaction and dashboard screenshots.
|
||||
@@ -6,17 +6,12 @@
|
||||
# @RELATION: DEPENDS_ON -> playwright
|
||||
# @RELATION: DEPENDS_ON -> openai
|
||||
# @RELATION: DEPENDS_ON -> tenacity
|
||||
# @INVARIANT: Screenshots must be 1920px width and capture full page height.
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import io
|
||||
from typing import List, Optional, Dict, Any
|
||||
from PIL import Image
|
||||
from playwright.async_api import async_playwright
|
||||
from openai import AsyncOpenAI, RateLimitError, AuthenticationError as OpenAIAuthenticationError
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception
|
||||
from openai import AsyncOpenAI, RateLimitError
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
||||
from .models import LLMProviderType, ValidationResult, ValidationStatus, DetectedIssue
|
||||
from ...core.logger import belief_scope, logger
|
||||
from ...core.config_models import Environment
|
||||
@@ -24,559 +19,142 @@ from ...core.config_models import Environment
|
||||
# [DEF:ScreenshotService:Class]
|
||||
# @PURPOSE: Handles capturing screenshots of Superset dashboards.
|
||||
class ScreenshotService:
|
||||
# [DEF:ScreenshotService.__init__:Function]
|
||||
# @PURPOSE: Initializes the ScreenshotService with environment configuration.
|
||||
# @PRE: env is a valid Environment object.
|
||||
def __init__(self, env: Environment):
|
||||
self.env = env
|
||||
# [/DEF:ScreenshotService.__init__:Function]
|
||||
|
||||
# [DEF:ScreenshotService.capture_dashboard:Function]
|
||||
# @PURPOSE: Captures a full-page screenshot of a dashboard using Playwright and CDP.
|
||||
# @PRE: dashboard_id is a valid string, output_path is a writable path.
|
||||
# @POST: Returns True if screenshot is saved successfully.
|
||||
# @SIDE_EFFECT: Launches a browser, performs UI login, switches tabs, and writes a PNG file.
|
||||
# @UX_STATE: [Navigating] -> Loading dashboard UI
|
||||
# @UX_STATE: [TabSwitching] -> Iterating through dashboard tabs to trigger lazy loading
|
||||
# @UX_STATE: [CalculatingHeight] -> Determining dashboard dimensions
|
||||
# @UX_STATE: [Capturing] -> Executing CDP screenshot
|
||||
# [DEF:capture_dashboard:Function]
|
||||
# @PURPOSE: Captures a screenshot of a dashboard using Playwright.
|
||||
# @PARAM: dashboard_id (str) - ID of the dashboard.
|
||||
# @PARAM: output_path (str) - Path to save the screenshot.
|
||||
# @RETURN: bool - True if successful.
|
||||
async def capture_dashboard(self, dashboard_id: str, output_path: str) -> bool:
|
||||
with belief_scope("capture_dashboard", f"dashboard_id={dashboard_id}"):
|
||||
logger.info(f"Capturing screenshot for dashboard {dashboard_id}")
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(
|
||||
headless=True,
|
||||
args=[
|
||||
"--disable-blink-features=AutomationControlled",
|
||||
"--disable-infobars",
|
||||
"--no-sandbox"
|
||||
]
|
||||
)
|
||||
# Set a realistic user agent to avoid 403 Forbidden from OpenResty/WAF
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||
# Construct base UI URL from environment (strip /api/v1 suffix)
|
||||
base_ui_url = self.env.url.rstrip("/")
|
||||
if base_ui_url.endswith("/api/v1"):
|
||||
base_ui_url = base_ui_url[:-len("/api/v1")]
|
||||
|
||||
# Create browser context with realistic headers
|
||||
context = await browser.new_context(
|
||||
viewport={'width': 1280, 'height': 720},
|
||||
user_agent=user_agent,
|
||||
extra_http_headers={
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
|
||||
"Upgrade-Insecure-Requests": "1",
|
||||
"Sec-Fetch-Dest": "document",
|
||||
"Sec-Fetch-Mode": "navigate",
|
||||
"Sec-Fetch-Site": "none",
|
||||
"Sec-Fetch-User": "?1"
|
||||
}
|
||||
)
|
||||
logger.info("Browser context created successfully")
|
||||
|
||||
browser = await p.chromium.launch(headless=True)
|
||||
context = await browser.new_context(viewport={'width': 1280, 'height': 720})
|
||||
page = await context.new_page()
|
||||
# Bypass navigator.webdriver detection
|
||||
await page.add_init_script("delete Object.getPrototypeOf(navigator).webdriver")
|
||||
|
||||
# 1. Navigate to login page and authenticate
|
||||
login_url = f"{base_ui_url.rstrip('/')}/login/"
|
||||
logger.info(f"[DEBUG] Navigating to login page: {login_url}")
|
||||
|
||||
response = await page.goto(login_url, wait_until="networkidle", timeout=60000)
|
||||
if response:
|
||||
logger.info(f"[DEBUG] Login page response status: {response.status}")
|
||||
|
||||
# Wait for login form to be ready
|
||||
await page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
# More exhaustive list of selectors for various Superset versions/themes
|
||||
selectors = {
|
||||
"username": ['input[name="username"]', 'input#username', 'input[placeholder*="Username"]', 'input[type="text"]'],
|
||||
"password": ['input[name="password"]', 'input#password', 'input[placeholder*="Password"]', 'input[type="password"]'],
|
||||
"submit": ['button[type="submit"]', 'button#submit', '.btn-primary', 'input[type="submit"]']
|
||||
}
|
||||
logger.info(f"[DEBUG] Attempting to find login form elements...")
|
||||
|
||||
# 1. Authenticate via API to get tokens
|
||||
from ...core.superset_client import SupersetClient
|
||||
client = SupersetClient(self.env)
|
||||
try:
|
||||
# Find and fill username
|
||||
u_selector = None
|
||||
for s in selectors["username"]:
|
||||
count = await page.locator(s).count()
|
||||
logger.info(f"[DEBUG] Selector '{s}': {count} elements found")
|
||||
if count > 0:
|
||||
u_selector = s
|
||||
break
|
||||
|
||||
if not u_selector:
|
||||
# Log all input fields on the page for debugging
|
||||
all_inputs = await page.locator('input').all()
|
||||
logger.info(f"[DEBUG] Found {len(all_inputs)} input fields on page")
|
||||
for i, inp in enumerate(all_inputs[:5]): # Log first 5
|
||||
inp_type = await inp.get_attribute('type')
|
||||
inp_name = await inp.get_attribute('name')
|
||||
inp_id = await inp.get_attribute('id')
|
||||
logger.info(f"[DEBUG] Input {i}: type={inp_type}, name={inp_name}, id={inp_id}")
|
||||
raise RuntimeError("Could not find username input field on login page")
|
||||
|
||||
logger.info(f"[DEBUG] Filling username field with selector: {u_selector}")
|
||||
await page.fill(u_selector, self.env.username)
|
||||
|
||||
# Find and fill password
|
||||
p_selector = None
|
||||
for s in selectors["password"]:
|
||||
if await page.locator(s).count() > 0:
|
||||
p_selector = s
|
||||
break
|
||||
|
||||
if not p_selector:
|
||||
raise RuntimeError("Could not find password input field on login page")
|
||||
|
||||
logger.info(f"[DEBUG] Filling password field with selector: {p_selector}")
|
||||
await page.fill(p_selector, self.env.password)
|
||||
|
||||
# Click submit
|
||||
s_selector = selectors["submit"][0]
|
||||
for s in selectors["submit"]:
|
||||
if await page.locator(s).count() > 0:
|
||||
s_selector = s
|
||||
break
|
||||
|
||||
logger.info(f"[DEBUG] Clicking submit button with selector: {s_selector}")
|
||||
await page.click(s_selector)
|
||||
|
||||
# Wait for navigation after login
|
||||
await page.wait_for_load_state("networkidle", timeout=30000)
|
||||
|
||||
# Check if login was successful
|
||||
if "/login" in page.url:
|
||||
# Check for error messages on page
|
||||
error_msg = await page.locator(".alert-danger, .error-message").text_content() if await page.locator(".alert-danger, .error-message").count() > 0 else "Unknown error"
|
||||
logger.error(f"[DEBUG] Login failed. Still on login page. Error: {error_msg}")
|
||||
debug_path = output_path.replace(".png", "_debug_failed_login.png")
|
||||
await page.screenshot(path=debug_path)
|
||||
raise RuntimeError(f"Login failed: {error_msg}. Debug screenshot saved to {debug_path}")
|
||||
|
||||
logger.info(f"[DEBUG] Login successful. Current URL: {page.url}")
|
||||
|
||||
# Check cookies after successful login
|
||||
page_cookies = await context.cookies()
|
||||
logger.info(f"[DEBUG] Cookies after login: {len(page_cookies)}")
|
||||
for c in page_cookies:
|
||||
logger.info(f"[DEBUG] Cookie: name={c['name']}, domain={c['domain']}, value={c.get('value', '')[:20]}...")
|
||||
tokens = client.authenticate()
|
||||
access_token = tokens.get("access_token")
|
||||
|
||||
# Set JWT in localStorage if possible, or use as cookie
|
||||
# Superset UI uses session cookies, but we can try to set the Authorization header
|
||||
# or inject the token into the session.
|
||||
# For now, we'll use the token to set a cookie if we can determine the name,
|
||||
# but the most reliable way for Playwright is often still the UI login
|
||||
# UNLESS we use the API to set a session cookie.
|
||||
logger.info("API Authentication successful")
|
||||
except Exception as e:
|
||||
page_title = await page.title()
|
||||
logger.error(f"UI Login failed. Page title: {page_title}, URL: {page.url}, Error: {str(e)}")
|
||||
debug_path = output_path.replace(".png", "_debug_failed_login.png")
|
||||
await page.screenshot(path=debug_path)
|
||||
raise RuntimeError(f"Login failed: {str(e)}. Debug screenshot saved to {debug_path}")
|
||||
logger.warning(f"API Authentication failed: {e}. Falling back to UI login.")
|
||||
|
||||
# 2. Navigate to dashboard
|
||||
# @UX_STATE: [Navigating] -> Loading dashboard UI
|
||||
dashboard_url = f"{base_ui_url.rstrip('/')}/superset/dashboard/{dashboard_id}/?standalone=true"
|
||||
dashboard_url = f"{self.env.url}/superset/dashboard/{dashboard_id}/"
|
||||
logger.info(f"Navigating to {dashboard_url}")
|
||||
|
||||
if base_ui_url.startswith("https://") and dashboard_url.startswith("http://"):
|
||||
dashboard_url = dashboard_url.replace("http://", "https://")
|
||||
# We still go to the URL first
|
||||
await page.goto(dashboard_url)
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
logger.info(f"[DEBUG] Navigating to dashboard: {dashboard_url}")
|
||||
|
||||
# Use networkidle to ensure all initial assets are loaded
|
||||
response = await page.goto(dashboard_url, wait_until="networkidle", timeout=60000)
|
||||
|
||||
if response:
|
||||
logger.info(f"[DEBUG] Dashboard navigation response status: {response.status}, URL: {response.url}")
|
||||
|
||||
try:
|
||||
# Wait for the dashboard grid to be present
|
||||
await page.wait_for_selector('.dashboard-component, .dashboard-header, [data-test="dashboard-grid"]', timeout=30000)
|
||||
logger.info(f"[DEBUG] Dashboard container loaded")
|
||||
# 3. Check if we are redirected to login
|
||||
if "/login" in page.url:
|
||||
logger.info(f"Redirected to login: {page.url}. Filling credentials from Environment.")
|
||||
|
||||
# More exhaustive list of selectors for various Superset versions/themes
|
||||
selectors = {
|
||||
"username": ['input[name="username"]', 'input#username', 'input[placeholder*="Username"]'],
|
||||
"password": ['input[name="password"]', 'input#password', 'input[placeholder*="Password"]'],
|
||||
"submit": ['button[type="submit"]', 'button#submit', '.btn-primary']
|
||||
}
|
||||
|
||||
# Wait for charts to finish loading (Superset uses loading spinners/skeletons)
|
||||
# We wait until loading indicators disappear or a timeout occurs
|
||||
try:
|
||||
# Wait for loading indicators to disappear
|
||||
await page.wait_for_selector('.loading, .ant-skeleton, .spinner', state="hidden", timeout=60000)
|
||||
logger.info(f"[DEBUG] Loading indicators hidden")
|
||||
except:
|
||||
logger.warning(f"[DEBUG] Timeout waiting for loading indicators to hide")
|
||||
|
||||
# Wait for charts to actually render their content (e.g., ECharts, NVD3)
|
||||
# We look for common chart containers that should have content
|
||||
try:
|
||||
await page.wait_for_selector('.chart-container canvas, .slice_container svg, .superset-chart-canvas, .grid-content .chart-container', timeout=60000)
|
||||
logger.info(f"[DEBUG] Chart content detected")
|
||||
except:
|
||||
logger.warning(f"[DEBUG] Timeout waiting for chart content")
|
||||
|
||||
# Additional check: wait for all chart containers to have non-empty content
|
||||
logger.info(f"[DEBUG] Waiting for all charts to have rendered content...")
|
||||
await page.wait_for_function("""() => {
|
||||
const charts = document.querySelectorAll('.chart-container, .slice_container');
|
||||
if (charts.length === 0) return true; // No charts to wait for
|
||||
# Find and fill username
|
||||
u_selector = None
|
||||
for s in selectors["username"]:
|
||||
if await page.locator(s).count() > 0:
|
||||
u_selector = s
|
||||
break
|
||||
|
||||
// Check if all charts have rendered content (canvas, svg, or non-empty div)
|
||||
return Array.from(charts).every(chart => {
|
||||
const hasCanvas = chart.querySelector('canvas') !== null;
|
||||
const hasSvg = chart.querySelector('svg') !== null;
|
||||
const hasContent = chart.innerText.trim().length > 0 || chart.children.length > 0;
|
||||
return hasCanvas || hasSvg || hasContent;
|
||||
});
|
||||
}""", timeout=60000)
|
||||
logger.info(f"[DEBUG] All charts have rendered content")
|
||||
|
||||
# Scroll to bottom and back to top to trigger lazy loading of all charts
|
||||
logger.info(f"[DEBUG] Scrolling to trigger lazy loading...")
|
||||
await page.evaluate("""async () => {
|
||||
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
|
||||
for (let i = 0; i < document.body.scrollHeight; i += 500) {
|
||||
window.scrollTo(0, i);
|
||||
await delay(100);
|
||||
}
|
||||
window.scrollTo(0, 0);
|
||||
await delay(500);
|
||||
}""")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"[DEBUG] Dashboard content wait failed: {e}, proceeding anyway after delay")
|
||||
|
||||
# Final stabilization delay - increased for complex dashboards
|
||||
logger.info(f"[DEBUG] Final stabilization delay...")
|
||||
await asyncio.sleep(15)
|
||||
|
||||
# Logic to handle tabs and full-page capture
|
||||
try:
|
||||
# 1. Handle Tabs (Recursive switching)
|
||||
# @UX_STATE: [TabSwitching] -> Iterating through dashboard tabs to trigger lazy loading
|
||||
processed_tabs = set()
|
||||
|
||||
async def switch_tabs(depth=0):
|
||||
if depth > 3: return # Limit recursion depth
|
||||
|
||||
tab_selectors = [
|
||||
'.ant-tabs-nav-list .ant-tabs-tab',
|
||||
'.dashboard-component-tabs .ant-tabs-tab',
|
||||
'[data-test="dashboard-component-tabs"] .ant-tabs-tab'
|
||||
]
|
||||
|
||||
found_tabs = []
|
||||
for selector in tab_selectors:
|
||||
found_tabs = await page.locator(selector).all()
|
||||
if found_tabs: break
|
||||
if not u_selector:
|
||||
raise RuntimeError("Could not find username input field")
|
||||
|
||||
if found_tabs:
|
||||
logger.info(f"[DEBUG][TabSwitching] Found {len(found_tabs)} tabs at depth {depth}")
|
||||
for i, tab in enumerate(found_tabs):
|
||||
try:
|
||||
tab_text = (await tab.inner_text()).strip()
|
||||
tab_id = f"{depth}_{i}_{tab_text}"
|
||||
|
||||
if tab_id in processed_tabs:
|
||||
continue
|
||||
|
||||
if await tab.is_visible():
|
||||
logger.info(f"[DEBUG][TabSwitching] Switching to tab: {tab_text}")
|
||||
processed_tabs.add(tab_id)
|
||||
|
||||
is_active = "ant-tabs-tab-active" in (await tab.get_attribute("class") or "")
|
||||
if not is_active:
|
||||
await tab.click()
|
||||
await asyncio.sleep(2) # Wait for content to render
|
||||
|
||||
await switch_tabs(depth + 1)
|
||||
except Exception as tab_e:
|
||||
logger.warning(f"[DEBUG][TabSwitching] Failed to process tab {i}: {tab_e}")
|
||||
await page.fill(u_selector, self.env.username)
|
||||
|
||||
# Find and fill password
|
||||
p_selector = None
|
||||
for s in selectors["password"]:
|
||||
if await page.locator(s).count() > 0:
|
||||
p_selector = s
|
||||
break
|
||||
|
||||
if not p_selector:
|
||||
raise RuntimeError("Could not find password input field")
|
||||
|
||||
try:
|
||||
first_tab = found_tabs[0]
|
||||
if "ant-tabs-tab-active" not in (await first_tab.get_attribute("class") or ""):
|
||||
await first_tab.click()
|
||||
await asyncio.sleep(1)
|
||||
except: pass
|
||||
|
||||
await switch_tabs()
|
||||
|
||||
# 2. Calculate full height for screenshot
|
||||
# @UX_STATE: [CalculatingHeight] -> Determining dashboard dimensions
|
||||
full_height = await page.evaluate("""() => {
|
||||
const body = document.body;
|
||||
const html = document.documentElement;
|
||||
const dashboardContent = document.querySelector('.dashboard-content');
|
||||
await page.fill(p_selector, self.env.password)
|
||||
|
||||
return Math.max(
|
||||
body.scrollHeight, body.offsetHeight,
|
||||
html.clientHeight, html.scrollHeight, html.offsetHeight,
|
||||
dashboardContent ? dashboardContent.scrollHeight + 100 : 0
|
||||
);
|
||||
}""")
|
||||
logger.info(f"[DEBUG] Calculated full height: {full_height}")
|
||||
|
||||
# DIAGNOSTIC: Count chart elements before resize
|
||||
chart_count_before = await page.evaluate("""() => {
|
||||
return {
|
||||
chartContainers: document.querySelectorAll('.chart-container, .slice_container').length,
|
||||
canvasElements: document.querySelectorAll('canvas').length,
|
||||
svgElements: document.querySelectorAll('.chart-container svg, .slice_container svg').length,
|
||||
visibleCharts: document.querySelectorAll('.chart-container:visible, .slice_container:visible').length
|
||||
};
|
||||
}""")
|
||||
logger.info(f"[DIAGNOSTIC] Chart elements BEFORE viewport resize: {chart_count_before}")
|
||||
|
||||
# DIAGNOSTIC: Capture pre-resize screenshot for comparison
|
||||
pre_resize_path = output_path.replace(".png", "_preresize.png")
|
||||
try:
|
||||
await page.screenshot(path=pre_resize_path, full_page=False, timeout=10000)
|
||||
import os
|
||||
pre_resize_size = os.path.getsize(pre_resize_path) if os.path.exists(pre_resize_path) else 0
|
||||
logger.info(f"[DIAGNOSTIC] Pre-resize screenshot saved: {pre_resize_path} ({pre_resize_size} bytes)")
|
||||
except Exception as pre_e:
|
||||
logger.warning(f"[DIAGNOSTIC] Failed to capture pre-resize screenshot: {pre_e}")
|
||||
|
||||
logger.info(f"[DIAGNOSTIC] Resizing viewport from current to 1920x{int(full_height)}")
|
||||
await page.set_viewport_size({"width": 1920, "height": int(full_height)})
|
||||
|
||||
# DIAGNOSTIC: Increased wait time and log timing
|
||||
logger.info("[DIAGNOSTIC] Waiting 10 seconds after viewport resize for re-render...")
|
||||
await asyncio.sleep(10)
|
||||
logger.info("[DIAGNOSTIC] Wait completed")
|
||||
|
||||
# DIAGNOSTIC: Count chart elements after resize and wait
|
||||
chart_count_after = await page.evaluate("""() => {
|
||||
return {
|
||||
chartContainers: document.querySelectorAll('.chart-container, .slice_container').length,
|
||||
canvasElements: document.querySelectorAll('canvas').length,
|
||||
svgElements: document.querySelectorAll('.chart-container svg, .slice_container svg').length,
|
||||
visibleCharts: document.querySelectorAll('.chart-container:visible, .slice_container:visible').length
|
||||
};
|
||||
}""")
|
||||
logger.info(f"[DIAGNOSTIC] Chart elements AFTER viewport resize + wait: {chart_count_after}")
|
||||
|
||||
# DIAGNOSTIC: Check if any charts have error states
|
||||
chart_errors = await page.evaluate("""() => {
|
||||
const errors = [];
|
||||
document.querySelectorAll('.chart-container, .slice_container').forEach((chart, i) => {
|
||||
const errorEl = chart.querySelector('.error, .alert-danger, .ant-alert-error');
|
||||
if (errorEl) {
|
||||
errors.push({index: i, text: errorEl.innerText.substring(0, 100)});
|
||||
}
|
||||
});
|
||||
return errors;
|
||||
}""")
|
||||
if chart_errors:
|
||||
logger.warning(f"[DIAGNOSTIC] Charts with error states detected: {chart_errors}")
|
||||
else:
|
||||
logger.info("[DIAGNOSTIC] No chart error states detected")
|
||||
|
||||
# 3. Take screenshot using CDP to bypass Playwright's font loading wait
|
||||
# @UX_STATE: [Capturing] -> Executing CDP screenshot
|
||||
logger.info("[DEBUG] Attempting full-page screenshot via CDP...")
|
||||
cdp = await page.context.new_cdp_session(page)
|
||||
|
||||
screenshot_data = await cdp.send("Page.captureScreenshot", {
|
||||
"format": "png",
|
||||
"fromSurface": True,
|
||||
"captureBeyondViewport": True
|
||||
})
|
||||
|
||||
image_data = base64.b64decode(screenshot_data["data"])
|
||||
|
||||
with open(output_path, 'wb') as f:
|
||||
f.write(image_data)
|
||||
|
||||
# DIAGNOSTIC: Verify screenshot file
|
||||
import os
|
||||
final_size = os.path.getsize(output_path) if os.path.exists(output_path) else 0
|
||||
logger.info(f"[DIAGNOSTIC] Final screenshot saved: {output_path}")
|
||||
logger.info(f"[DIAGNOSTIC] Final screenshot size: {final_size} bytes ({final_size / 1024:.2f} KB)")
|
||||
|
||||
# DIAGNOSTIC: Get image dimensions
|
||||
try:
|
||||
with Image.open(output_path) as final_img:
|
||||
logger.info(f"[DIAGNOSTIC] Final screenshot dimensions: {final_img.width}x{final_img.height}")
|
||||
except Exception as img_err:
|
||||
logger.warning(f"[DIAGNOSTIC] Could not read final image dimensions: {img_err}")
|
||||
# Click submit
|
||||
s_selector = selectors["submit"][0]
|
||||
for s in selectors["submit"]:
|
||||
if await page.locator(s).count() > 0:
|
||||
s_selector = s
|
||||
break
|
||||
|
||||
logger.info(f"Full-page screenshot saved to {output_path} (via CDP)")
|
||||
except Exception as e:
|
||||
logger.error(f"[DEBUG] Full-page/Tab capture failed: {e}")
|
||||
try:
|
||||
await page.screenshot(path=output_path, full_page=True, timeout=10000)
|
||||
except Exception as e2:
|
||||
logger.error(f"[DEBUG] Fallback screenshot also failed: {e2}")
|
||||
await page.screenshot(path=output_path, timeout=5000)
|
||||
await page.click(s_selector)
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
# Re-verify we are at the dashboard
|
||||
if "/login" in page.url:
|
||||
# Check for error messages on page
|
||||
error_msg = await page.locator(".alert-danger, .error-message").text_content() if await page.locator(".alert-danger, .error-message").count() > 0 else "Unknown error"
|
||||
raise RuntimeError(f"Login failed after submission: {error_msg}")
|
||||
|
||||
if "/superset/dashboard" not in page.url:
|
||||
logger.info(f"Redirecting back to dashboard after login: {dashboard_url}")
|
||||
await page.goto(dashboard_url)
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
except Exception as e:
|
||||
page_title = await page.title()
|
||||
logger.error(f"UI Login failed. Page title: {page_title}, URL: {page.url}, Error: {str(e)}")
|
||||
debug_path = output_path.replace(".png", "_debug_failed_login.png")
|
||||
await page.screenshot(path=debug_path)
|
||||
raise RuntimeError(f"Login failed: {str(e)}. Debug screenshot saved to {debug_path}")
|
||||
# Wait a bit more for charts to render
|
||||
await asyncio.sleep(5)
|
||||
|
||||
await page.screenshot(path=output_path, full_page=True)
|
||||
await browser.close()
|
||||
logger.info(f"Screenshot saved to {output_path}")
|
||||
return True
|
||||
# [/DEF:ScreenshotService.capture_dashboard:Function]
|
||||
# [/DEF:ScreenshotService:Class]
|
||||
|
||||
# [DEF:LLMClient:Class]
|
||||
# @PURPOSE: Wrapper for LLM provider APIs.
|
||||
class LLMClient:
|
||||
# [DEF:LLMClient.__init__:Function]
|
||||
# @PURPOSE: Initializes the LLMClient with provider settings.
|
||||
# @PRE: api_key, base_url, and default_model are non-empty strings.
|
||||
def __init__(self, provider_type: LLMProviderType, api_key: str, base_url: str, default_model: str):
|
||||
self.provider_type = provider_type
|
||||
self.api_key = api_key
|
||||
self.base_url = base_url
|
||||
self.default_model = default_model
|
||||
|
||||
# DEBUG: Log initialization parameters (without exposing full API key)
|
||||
logger.info(f"[LLMClient.__init__] Initializing LLM client:")
|
||||
logger.info(f"[LLMClient.__init__] Provider Type: {provider_type}")
|
||||
logger.info(f"[LLMClient.__init__] Base URL: {base_url}")
|
||||
logger.info(f"[LLMClient.__init__] Default Model: {default_model}")
|
||||
logger.info(f"[LLMClient.__init__] API Key (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
|
||||
logger.info(f"[LLMClient.__init__] API Key Length: {len(api_key) if api_key else 0}")
|
||||
|
||||
self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
||||
# [/DEF:LLMClient.__init__:Function]
|
||||
|
||||
# [DEF:LLMClient.get_json_completion:Function]
|
||||
# @PURPOSE: Helper to handle LLM calls with JSON mode and fallback parsing.
|
||||
# @PRE: messages is a list of valid message dictionaries.
|
||||
# @POST: Returns a parsed JSON dictionary.
|
||||
# @SIDE_EFFECT: Calls external LLM API.
|
||||
def _should_retry(exception: Exception) -> bool:
|
||||
"""Custom retry predicate that excludes authentication errors."""
|
||||
# Don't retry on authentication errors
|
||||
if isinstance(exception, OpenAIAuthenticationError):
|
||||
return False
|
||||
# Retry on rate limit errors and other exceptions
|
||||
return isinstance(exception, (RateLimitError, Exception))
|
||||
|
||||
# [DEF:analyze_dashboard:Function]
|
||||
# @PURPOSE: Sends dashboard data to LLM for analysis.
|
||||
@retry(
|
||||
stop=stop_after_attempt(5),
|
||||
wait=wait_exponential(multiplier=2, min=5, max=60),
|
||||
retry=retry_if_exception(_should_retry),
|
||||
reraise=True
|
||||
retry=retry_if_exception_type((Exception, RateLimitError))
|
||||
)
|
||||
async def get_json_completion(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
with belief_scope("get_json_completion"):
|
||||
response = None
|
||||
try:
|
||||
try:
|
||||
logger.info(f"[get_json_completion] Attempting LLM call with JSON mode for model: {self.default_model}")
|
||||
logger.info(f"[get_json_completion] Base URL being used: {self.base_url}")
|
||||
logger.info(f"[get_json_completion] Number of messages: {len(messages)}")
|
||||
logger.info(f"[get_json_completion] API Key present: {bool(self.api_key and len(self.api_key) > 0)}")
|
||||
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.default_model,
|
||||
messages=messages,
|
||||
response_format={"type": "json_object"}
|
||||
)
|
||||
except Exception as e:
|
||||
if "JSON mode is not enabled" in str(e) or "400" in str(e):
|
||||
logger.warning(f"[get_json_completion] JSON mode failed or not supported: {str(e)}. Falling back to plain text response.")
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.default_model,
|
||||
messages=messages
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
||||
logger.debug(f"[get_json_completion] LLM Response: {response}")
|
||||
except OpenAIAuthenticationError as e:
|
||||
logger.error(f"[get_json_completion] Authentication error: {str(e)}")
|
||||
# Do not retry on auth errors - re-raise to stop retry
|
||||
raise
|
||||
except RateLimitError as e:
|
||||
logger.warning(f"[get_json_completion] Rate limit hit: {str(e)}")
|
||||
|
||||
# Extract retry_delay from error metadata if available
|
||||
retry_delay = 5.0 # Default fallback
|
||||
try:
|
||||
# Based on logs, the raw response is in e.body or e.response.json()
|
||||
# The logs show 'metadata': {'raw': '...'} which suggests a proxy or specific client wrapper
|
||||
# Let's try to find the 'retryDelay' in the error message or response
|
||||
import re
|
||||
|
||||
# Try to find "retryDelay": "XXs" in the string representation of the error
|
||||
error_str = str(e)
|
||||
match = re.search(r'"retryDelay":\s*"(\d+)s"', error_str)
|
||||
if match:
|
||||
retry_delay = float(match.group(1))
|
||||
else:
|
||||
# Try to parse from response if it's a standard OpenAI-like error with body
|
||||
if hasattr(e, 'body') and isinstance(e.body, dict):
|
||||
# Some providers put it in details
|
||||
details = e.body.get('error', {}).get('details', [])
|
||||
for detail in details:
|
||||
if detail.get('@type') == 'type.googleapis.com/google.rpc.RetryInfo':
|
||||
delay_str = detail.get('retryDelay', '5s')
|
||||
retry_delay = float(delay_str.rstrip('s'))
|
||||
break
|
||||
except Exception as parse_e:
|
||||
logger.debug(f"[get_json_completion] Failed to parse retry delay: {parse_e}")
|
||||
|
||||
# Add a small safety margin (0.5s) as requested
|
||||
wait_time = retry_delay + 0.5
|
||||
logger.info(f"[get_json_completion] Waiting for {wait_time}s before retry...")
|
||||
await asyncio.sleep(wait_time)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[get_json_completion] LLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
if not response or not hasattr(response, 'choices') or not response.choices:
|
||||
raise RuntimeError(f"Invalid LLM response: {response}")
|
||||
|
||||
content = response.choices[0].message.content
|
||||
logger.debug(f"[get_json_completion] Raw content to parse: {content}")
|
||||
|
||||
try:
|
||||
return json.loads(content)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("[get_json_completion] Failed to parse JSON directly, attempting to extract from code blocks")
|
||||
if "```json" in content:
|
||||
json_str = content.split("```json")[1].split("```")[0].strip()
|
||||
return json.loads(json_str)
|
||||
elif "```" in content:
|
||||
json_str = content.split("```")[1].split("```")[0].strip()
|
||||
return json.loads(json_str)
|
||||
else:
|
||||
raise
|
||||
# [/DEF:LLMClient.get_json_completion:Function]
|
||||
|
||||
# [DEF:LLMClient.analyze_dashboard:Function]
|
||||
# @PURPOSE: Sends dashboard data (screenshot + logs) to LLM for health analysis.
|
||||
# @PRE: screenshot_path exists, logs is a list of strings.
|
||||
# @POST: Returns a structured analysis dictionary (status, summary, issues).
|
||||
# @SIDE_EFFECT: Reads screenshot file and calls external LLM API.
|
||||
async def analyze_dashboard(self, screenshot_path: str, logs: List[str]) -> Dict[str, Any]:
|
||||
with belief_scope("analyze_dashboard"):
|
||||
# Optimize image to reduce token count (US1 / T023)
|
||||
# Gemini/Gemma models have limits on input tokens, and large images contribute significantly.
|
||||
try:
|
||||
with Image.open(screenshot_path) as img:
|
||||
# Convert to RGB if necessary
|
||||
if img.mode in ("RGBA", "P"):
|
||||
img = img.convert("RGB")
|
||||
|
||||
# Resize if too large (max 1024px width while maintaining aspect ratio)
|
||||
# We reduce width further to 1024px to stay within token limits for long dashboards
|
||||
max_width = 1024
|
||||
if img.width > max_width or img.height > 2048:
|
||||
# Calculate scaling factor to fit within 1024x2048
|
||||
scale = min(max_width / img.width, 2048 / img.height)
|
||||
if scale < 1.0:
|
||||
new_width = int(img.width * scale)
|
||||
new_height = int(img.height * scale)
|
||||
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
logger.info(f"[analyze_dashboard] Resized image from {img.width}x{img.height} to {new_width}x{new_height}")
|
||||
|
||||
# Compress and convert to base64
|
||||
buffer = io.BytesIO()
|
||||
# Lower quality to 60% to further reduce payload size
|
||||
img.save(buffer, format="JPEG", quality=60, optimize=True)
|
||||
base_64_image = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||
logger.info(f"[analyze_dashboard] Optimized image size: {len(buffer.getvalue()) / 1024:.2f} KB")
|
||||
except Exception as img_e:
|
||||
logger.warning(f"[analyze_dashboard] Image optimization failed: {img_e}. Using raw image.")
|
||||
with open(screenshot_path, "rb") as image_file:
|
||||
base_64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
||||
import base64
|
||||
with open(screenshot_path, "rb") as image_file:
|
||||
base64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
||||
|
||||
log_text = "\n".join(logs)
|
||||
prompt = f"""
|
||||
@@ -599,31 +177,48 @@ class LLMClient:
|
||||
}}
|
||||
"""
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": prompt},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{base_64_image}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
logger.debug(f"[analyze_dashboard] Calling LLM with model: {self.default_model}")
|
||||
try:
|
||||
return await self.get_json_completion(messages)
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.default_model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": prompt},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{base64_image}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
response_format={"type": "json_object"}
|
||||
)
|
||||
logger.debug(f"[analyze_dashboard] LLM Response: {response}")
|
||||
except RateLimitError as e:
|
||||
logger.warning(f"[analyze_dashboard] Rate limit hit: {str(e)}")
|
||||
raise # tenacity will handle retry
|
||||
except Exception as e:
|
||||
logger.error(f"[analyze_dashboard] Failed to get analysis: {str(e)}")
|
||||
logger.error(f"[analyze_dashboard] LLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
if not response or not hasattr(response, 'choices') or not response.choices:
|
||||
error_info = getattr(response, 'error', 'No choices in response')
|
||||
logger.error(f"[analyze_dashboard] Invalid LLM response. Error info: {error_info}")
|
||||
return {
|
||||
"status": "FAIL",
|
||||
"summary": f"Failed to get response from LLM: {str(e)}",
|
||||
"summary": f"Failed to get response from LLM: {error_info}",
|
||||
"issues": [{"severity": "FAIL", "message": "LLM provider returned empty or invalid response"}]
|
||||
}
|
||||
# [/DEF:LLMClient.analyze_dashboard:Function]
|
||||
|
||||
import json
|
||||
result = json.loads(response.choices[0].message.content)
|
||||
return result
|
||||
# [/DEF:analyze_dashboard:Function]
|
||||
|
||||
# [/DEF:LLMClient:Class]
|
||||
|
||||
# [/DEF:backend/src/plugins/llm_analysis/service.py:Module]
|
||||
# [/DEF:backend.src.plugins.llm_analysis.service:Module]
|
||||
@@ -3,7 +3,6 @@
|
||||
# @PURPOSE: Implements a plugin for mapping dataset columns using external database connections or Excel files.
|
||||
# @LAYER: Plugins
|
||||
# @RELATION: Inherits from PluginBase. Uses DatasetMapper from superset_tool.
|
||||
# @RELATION: USES -> TaskContext
|
||||
# @CONSTRAINT: Must use belief_scope for logging.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
@@ -14,7 +13,6 @@ from ..core.logger import logger, belief_scope
|
||||
from ..core.database import SessionLocal
|
||||
from ..models.connection import ConnectionConfig
|
||||
from ..core.utils.dataset_mapper import DatasetMapper
|
||||
from ..core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:MapperPlugin:Class]
|
||||
@@ -130,27 +128,19 @@ class MapperPlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes the dataset mapping logic with TaskContext support.
|
||||
# @PURPOSE: Executes the dataset mapping logic.
|
||||
# @PARAM: params (Dict[str, Any]) - Mapping parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: Params contain valid 'env', 'dataset_id', and 'source'. params must be a dictionary.
|
||||
# @POST: Updates the dataset in Superset.
|
||||
# @RETURN: Dict[str, Any] - Execution status.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
|
||||
async def execute(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("execute"):
|
||||
env_name = params.get("env")
|
||||
dataset_id = params.get("dataset_id")
|
||||
source = params.get("source")
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
db_log = log.with_source("postgres") if context else log
|
||||
|
||||
if not env_name or dataset_id is None or not source:
|
||||
log.error("Missing required parameters: env, dataset_id, source")
|
||||
logger.error("[MapperPlugin.execute][State] Missing required parameters.")
|
||||
raise ValueError("Missing required parameters: env, dataset_id, source")
|
||||
|
||||
# Get config and initialize client
|
||||
@@ -158,7 +148,7 @@ class MapperPlugin(PluginBase):
|
||||
config_manager = get_config_manager()
|
||||
env_config = config_manager.get_environment(env_name)
|
||||
if not env_config:
|
||||
log.error(f"Environment '{env_name}' not found in configuration.")
|
||||
logger.error(f"[MapperPlugin.execute][State] Environment '{env_name}' not found.")
|
||||
raise ValueError(f"Environment '{env_name}' not found in configuration.")
|
||||
|
||||
client = SupersetClient(env_config)
|
||||
@@ -168,7 +158,7 @@ class MapperPlugin(PluginBase):
|
||||
if source == "postgres":
|
||||
connection_id = params.get("connection_id")
|
||||
if not connection_id:
|
||||
log.error("connection_id is required for postgres source.")
|
||||
logger.error("[MapperPlugin.execute][State] connection_id is required for postgres source.")
|
||||
raise ValueError("connection_id is required for postgres source.")
|
||||
|
||||
# Load connection from DB
|
||||
@@ -176,7 +166,7 @@ class MapperPlugin(PluginBase):
|
||||
try:
|
||||
conn_config = db.query(ConnectionConfig).filter(ConnectionConfig.id == connection_id).first()
|
||||
if not conn_config:
|
||||
db_log.error(f"Connection {connection_id} not found.")
|
||||
logger.error(f"[MapperPlugin.execute][State] Connection {connection_id} not found.")
|
||||
raise ValueError(f"Connection {connection_id} not found.")
|
||||
|
||||
postgres_config = {
|
||||
@@ -186,11 +176,10 @@ class MapperPlugin(PluginBase):
|
||||
'host': conn_config.host,
|
||||
'port': str(conn_config.port) if conn_config.port else '5432'
|
||||
}
|
||||
db_log.debug(f"Loaded connection config for {conn_config.host}:{conn_config.port}/{conn_config.database}")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
log.info(f"Starting mapping for dataset {dataset_id} in {env_name}")
|
||||
logger.info(f"[MapperPlugin.execute][Action] Starting mapping for dataset {dataset_id} in {env_name}")
|
||||
|
||||
mapper = DatasetMapper()
|
||||
|
||||
@@ -204,10 +193,10 @@ class MapperPlugin(PluginBase):
|
||||
table_name=params.get("table_name"),
|
||||
table_schema=params.get("table_schema") or "public"
|
||||
)
|
||||
superset_log.info(f"Mapping completed for dataset {dataset_id}")
|
||||
logger.info(f"[MapperPlugin.execute][Success] Mapping completed for dataset {dataset_id}")
|
||||
return {"status": "success", "dataset_id": dataset_id}
|
||||
except Exception as e:
|
||||
log.error(f"Mapping failed: {e}")
|
||||
logger.error(f"[MapperPlugin.execute][Failure] Mapping failed: {e}")
|
||||
raise
|
||||
# [/DEF:execute:Function]
|
||||
|
||||
|
||||
@@ -5,22 +5,20 @@
|
||||
# @RELATION: IMPLEMENTS -> PluginBase
|
||||
# @RELATION: DEPENDS_ON -> superset_tool.client
|
||||
# @RELATION: DEPENDS_ON -> superset_tool.utils
|
||||
# @RELATION: USES -> TaskContext
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from typing import Dict, Any, List
|
||||
from pathlib import Path
|
||||
import zipfile
|
||||
import re
|
||||
|
||||
from ..core.plugin_base import PluginBase
|
||||
from ..core.logger import belief_scope, logger as app_logger
|
||||
from ..core.logger import belief_scope
|
||||
from ..core.superset_client import SupersetClient
|
||||
from ..core.utils.fileio import create_temp_file, update_yamls, create_dashboard_export
|
||||
from ..dependencies import get_config_manager
|
||||
from ..core.migration_engine import MigrationEngine
|
||||
from ..core.database import SessionLocal
|
||||
from ..models.mapping import DatabaseMapping, Environment
|
||||
from ..core.task_manager.context import TaskContext
|
||||
|
||||
# [DEF:MigrationPlugin:Class]
|
||||
# @PURPOSE: Implementation of the migration plugin logic.
|
||||
@@ -134,12 +132,11 @@ class MigrationPlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes the dashboard migration logic with TaskContext support.
|
||||
# @PURPOSE: Executes the dashboard migration logic.
|
||||
# @PARAM: params (Dict[str, Any]) - Migration parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: Source and target environments must be configured.
|
||||
# @POST: Selected dashboards are migrated.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
with belief_scope("MigrationPlugin.execute"):
|
||||
source_env_id = params.get("source_env_id")
|
||||
target_env_id = params.get("target_env_id")
|
||||
@@ -160,15 +157,74 @@ class MigrationPlugin(PluginBase):
|
||||
from ..dependencies import get_task_manager
|
||||
tm = get_task_manager()
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app_logger
|
||||
log = context.logger if context else app_logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
migration_log = log.with_source("migration") if context else log
|
||||
|
||||
log.info("Starting migration task.")
|
||||
log.debug(f"Params: {params}")
|
||||
class TaskLoggerProxy:
|
||||
# [DEF:__init__:Function]
|
||||
# @PURPOSE: Initializes the proxy logger.
|
||||
# @PRE: None.
|
||||
# @POST: Instance is initialized.
|
||||
def __init__(self):
|
||||
with belief_scope("__init__"):
|
||||
# Initialize parent with dummy values since we override methods
|
||||
pass
|
||||
# [/DEF:__init__:Function]
|
||||
|
||||
# [DEF:debug:Function]
|
||||
# @PURPOSE: Logs a debug message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def debug(self, msg, *args, extra=None, **kwargs):
|
||||
with belief_scope("debug"):
|
||||
if task_id: tm._add_log(task_id, "DEBUG", msg, extra or {})
|
||||
# [/DEF:debug:Function]
|
||||
|
||||
# [DEF:info:Function]
|
||||
# @PURPOSE: Logs an info message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def info(self, msg, *args, extra=None, **kwargs):
|
||||
with belief_scope("info"):
|
||||
if task_id: tm._add_log(task_id, "INFO", msg, extra or {})
|
||||
# [/DEF:info:Function]
|
||||
|
||||
# [DEF:warning:Function]
|
||||
# @PURPOSE: Logs a warning message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def warning(self, msg, *args, extra=None, **kwargs):
|
||||
with belief_scope("warning"):
|
||||
if task_id: tm._add_log(task_id, "WARNING", msg, extra or {})
|
||||
# [/DEF:warning:Function]
|
||||
|
||||
# [DEF:error:Function]
|
||||
# @PURPOSE: Logs an error message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def error(self, msg, *args, extra=None, **kwargs):
|
||||
with belief_scope("error"):
|
||||
if task_id: tm._add_log(task_id, "ERROR", msg, extra or {})
|
||||
# [/DEF:error:Function]
|
||||
|
||||
# [DEF:critical:Function]
|
||||
# @PURPOSE: Logs a critical message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def critical(self, msg, *args, extra=None, **kwargs):
|
||||
with belief_scope("critical"):
|
||||
if task_id: tm._add_log(task_id, "ERROR", msg, extra or {})
|
||||
# [/DEF:critical:Function]
|
||||
|
||||
# [DEF:exception:Function]
|
||||
# @PURPOSE: Logs an exception message to the task manager.
|
||||
# @PRE: msg is a string.
|
||||
# @POST: Log is added to task manager if task_id exists.
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
with belief_scope("exception"):
|
||||
if task_id: tm._add_log(task_id, "ERROR", msg, {"exception": True})
|
||||
# [/DEF:exception:Function]
|
||||
|
||||
logger = TaskLoggerProxy()
|
||||
logger.info(f"[MigrationPlugin][Entry] Starting migration task.")
|
||||
logger.info(f"[MigrationPlugin][Action] Params: {params}")
|
||||
|
||||
try:
|
||||
with belief_scope("execute"):
|
||||
@@ -195,7 +251,7 @@ class MigrationPlugin(PluginBase):
|
||||
from_env_name = src_env.name
|
||||
to_env_name = tgt_env.name
|
||||
|
||||
log.info(f"Resolved environments: {from_env_name} -> {to_env_name}")
|
||||
logger.info(f"[MigrationPlugin][State] Resolved environments: {from_env_name} -> {to_env_name}")
|
||||
|
||||
from_c = SupersetClient(src_env)
|
||||
to_c = SupersetClient(tgt_env)
|
||||
@@ -214,11 +270,11 @@ class MigrationPlugin(PluginBase):
|
||||
d for d in all_dashboards if re.search(regex_str, d["dashboard_title"], re.IGNORECASE)
|
||||
]
|
||||
else:
|
||||
log.warning("No selection criteria provided (selected_ids or dashboard_regex).")
|
||||
logger.warning("[MigrationPlugin][State] No selection criteria provided (selected_ids or dashboard_regex).")
|
||||
return
|
||||
|
||||
if not dashboards_to_migrate:
|
||||
log.warning("No dashboards found matching criteria.")
|
||||
logger.warning("[MigrationPlugin][State] No dashboards found matching criteria.")
|
||||
return
|
||||
|
||||
# Fetch mappings from database
|
||||
@@ -236,7 +292,7 @@ class MigrationPlugin(PluginBase):
|
||||
DatabaseMapping.target_env_id == tgt_env.id
|
||||
).all()
|
||||
db_mapping = {m.source_db_uuid: m.target_db_uuid for m in mappings}
|
||||
log.info(f"Loaded {len(db_mapping)} database mappings.")
|
||||
logger.info(f"[MigrationPlugin][State] Loaded {len(db_mapping)} database mappings.")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
@@ -255,7 +311,7 @@ class MigrationPlugin(PluginBase):
|
||||
if not success and replace_db_config:
|
||||
# Signal missing mapping and wait (only if we care about mappings)
|
||||
if task_id:
|
||||
log.info(f"Pausing for missing mapping in task {task_id}")
|
||||
logger.info(f"[MigrationPlugin][Action] Pausing for missing mapping in task {task_id}")
|
||||
# In a real scenario, we'd pass the missing DB info to the frontend
|
||||
# For this task, we'll just simulate the wait
|
||||
await tm.wait_for_resolution(task_id)
|
||||
@@ -277,9 +333,9 @@ class MigrationPlugin(PluginBase):
|
||||
if success:
|
||||
to_c.import_dashboard(file_name=tmp_new_zip, dash_id=dash_id, dash_slug=dash_slug)
|
||||
else:
|
||||
migration_log.error(f"Failed to transform ZIP for dashboard {title}")
|
||||
logger.error(f"[MigrationPlugin][Failure] Failed to transform ZIP for dashboard {title}")
|
||||
|
||||
superset_log.info(f"Dashboard {title} imported.")
|
||||
logger.info(f"[MigrationPlugin][Success] Dashboard {title} imported.")
|
||||
except Exception as exc:
|
||||
# Check for password error
|
||||
error_msg = str(exc)
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
# @PURPOSE: Implements a plugin for searching text patterns across all datasets in a specific Superset environment.
|
||||
# @LAYER: Plugins
|
||||
# @RELATION: Inherits from PluginBase. Uses SupersetClient from core.
|
||||
# @RELATION: USES -> TaskContext
|
||||
# @CONSTRAINT: Must use belief_scope for logging.
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
@@ -12,7 +11,6 @@ from typing import Dict, Any, List, Optional
|
||||
from ..core.plugin_base import PluginBase
|
||||
from ..core.superset_client import SupersetClient
|
||||
from ..core.logger import logger, belief_scope
|
||||
from ..core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:SearchPlugin:Class]
|
||||
@@ -101,26 +99,18 @@ class SearchPlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes the dataset search logic with TaskContext support.
|
||||
# @PURPOSE: Executes the dataset search logic.
|
||||
# @PARAM: params (Dict[str, Any]) - Search parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PRE: Params contain valid 'env' and 'query'.
|
||||
# @POST: Returns a dictionary with count and results list.
|
||||
# @RETURN: Dict[str, Any] - Search results.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
|
||||
async def execute(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with belief_scope("SearchPlugin.execute", f"params={params}"):
|
||||
env_name = params.get("env")
|
||||
search_query = params.get("query")
|
||||
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
superset_log = log.with_source("superset_api") if context else log
|
||||
search_log = log.with_source("search") if context else log
|
||||
|
||||
if not env_name or not search_query:
|
||||
log.error("Missing required parameters: env, query")
|
||||
logger.error("[SearchPlugin.execute][State] Missing required parameters.")
|
||||
raise ValueError("Missing required parameters: env, query")
|
||||
|
||||
# Get config and initialize client
|
||||
@@ -128,20 +118,20 @@ class SearchPlugin(PluginBase):
|
||||
config_manager = get_config_manager()
|
||||
env_config = config_manager.get_environment(env_name)
|
||||
if not env_config:
|
||||
log.error(f"Environment '{env_name}' not found in configuration.")
|
||||
logger.error(f"[SearchPlugin.execute][State] Environment '{env_name}' not found.")
|
||||
raise ValueError(f"Environment '{env_name}' not found in configuration.")
|
||||
|
||||
client = SupersetClient(env_config)
|
||||
client.authenticate()
|
||||
|
||||
log.info(f"Searching for pattern: '{search_query}' in environment: {env_name}")
|
||||
logger.info(f"[SearchPlugin.execute][Action] Searching for pattern: '{search_query}' in environment: {env_name}")
|
||||
|
||||
try:
|
||||
# Ported logic from search_script.py
|
||||
_, datasets = client.get_datasets(query={"columns": ["id", "table_name", "sql", "database", "columns"]})
|
||||
|
||||
if not datasets:
|
||||
search_log.warning("No datasets found.")
|
||||
logger.warning("[SearchPlugin.execute][State] No datasets found.")
|
||||
return {"count": 0, "results": []}
|
||||
|
||||
pattern = re.compile(search_query, re.IGNORECASE)
|
||||
@@ -165,17 +155,17 @@ class SearchPlugin(PluginBase):
|
||||
"full_value": value_str
|
||||
})
|
||||
|
||||
search_log.info(f"Found matches in {len(results)} locations.")
|
||||
logger.info(f"[SearchPlugin.execute][Success] Found matches in {len(results)} locations.")
|
||||
return {
|
||||
"count": len(results),
|
||||
"results": results
|
||||
}
|
||||
|
||||
except re.error as e:
|
||||
search_log.error(f"Invalid regex pattern: {e}")
|
||||
logger.error(f"[SearchPlugin.execute][Failure] Invalid regex pattern: {e}")
|
||||
raise ValueError(f"Invalid regex pattern: {e}")
|
||||
except Exception as e:
|
||||
log.error(f"Error during search: {e}")
|
||||
logger.error(f"[SearchPlugin.execute][Failure] Error during search: {e}")
|
||||
raise
|
||||
# [/DEF:execute:Function]
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
# @LAYER: App
|
||||
# @RELATION: IMPLEMENTS -> PluginBase
|
||||
# @RELATION: DEPENDS_ON -> backend.src.models.storage
|
||||
# @RELATION: USES -> TaskContext
|
||||
#
|
||||
# @INVARIANT: All file operations must be restricted to the configured storage root.
|
||||
|
||||
@@ -21,7 +20,6 @@ from ...core.plugin_base import PluginBase
|
||||
from ...core.logger import belief_scope, logger
|
||||
from ...models.storage import StoredFile, FileCategory, StorageConfig
|
||||
from ...dependencies import get_config_manager
|
||||
from ...core.task_manager.context import TaskContext
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:StoragePlugin:Class]
|
||||
@@ -114,21 +112,12 @@ class StoragePlugin(PluginBase):
|
||||
# [/DEF:get_schema:Function]
|
||||
|
||||
# [DEF:execute:Function]
|
||||
# @PURPOSE: Executes storage-related tasks with TaskContext support.
|
||||
# @PARAM: params (Dict[str, Any]) - Storage parameters.
|
||||
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
|
||||
# @PURPOSE: Executes storage-related tasks (placeholder for PluginBase compliance).
|
||||
# @PRE: params must match the plugin schema.
|
||||
# @POST: Task is executed and logged.
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
with belief_scope("StoragePlugin:execute"):
|
||||
# Use TaskContext logger if available, otherwise fall back to app logger
|
||||
log = context.logger if context else logger
|
||||
|
||||
# Create sub-loggers for different components
|
||||
storage_log = log.with_source("storage") if context else log
|
||||
filesystem_log = log.with_source("filesystem") if context else log
|
||||
|
||||
storage_log.info(f"Executing with params: {params}")
|
||||
logger.info(f"[StoragePlugin][Action] Executing with params: {params}")
|
||||
# [/DEF:execute:Function]
|
||||
|
||||
# [DEF:get_storage_root:Function]
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# [DEF:backend.src.scripts.create_admin:Module]
|
||||
#
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: admin, setup, user, auth, cli
|
||||
# @PURPOSE: CLI tool for creating the initial admin user.
|
||||
# @LAYER: Scripts
|
||||
|
||||
@@ -15,74 +15,43 @@ from cryptography.fernet import Fernet
|
||||
import os
|
||||
|
||||
# [DEF:EncryptionManager:Class]
|
||||
# @TIER: CRITICAL
|
||||
# @PURPOSE: Handles encryption and decryption of sensitive data like API keys.
|
||||
# @INVARIANT: Uses a secret key from environment or a default one (fallback only for dev).
|
||||
class EncryptionManager:
|
||||
# [DEF:EncryptionManager.__init__:Function]
|
||||
# @PURPOSE: Initialize the encryption manager with a Fernet key.
|
||||
# @PRE: ENCRYPTION_KEY env var must be set or use default dev key.
|
||||
# @POST: Fernet instance ready for encryption/decryption.
|
||||
# @INVARIANT: Uses a secret key from environment or a default one (fallback only for dev).
|
||||
def __init__(self):
|
||||
self.key = os.getenv("ENCRYPTION_KEY", "REMOVED_HISTORICAL_SECRET_DO_NOT_USE").encode()
|
||||
self.key = os.getenv("ENCRYPTION_KEY", "7_u-l7-B-j9f5_V5z-5-5-5-5-5-5-5-5-5-5-5-5-5=").encode()
|
||||
self.fernet = Fernet(self.key)
|
||||
# [/DEF:EncryptionManager.__init__:Function]
|
||||
|
||||
# [DEF:EncryptionManager.encrypt:Function]
|
||||
# @PURPOSE: Encrypt a plaintext string.
|
||||
# @PRE: data must be a non-empty string.
|
||||
# @POST: Returns encrypted string.
|
||||
def encrypt(self, data: str) -> str:
|
||||
return self.fernet.encrypt(data.encode()).decode()
|
||||
# [/DEF:EncryptionManager.encrypt:Function]
|
||||
|
||||
# [DEF:EncryptionManager.decrypt:Function]
|
||||
# @PURPOSE: Decrypt an encrypted string.
|
||||
# @PRE: encrypted_data must be a valid Fernet-encrypted string.
|
||||
# @POST: Returns original plaintext string.
|
||||
def decrypt(self, encrypted_data: str) -> str:
|
||||
return self.fernet.decrypt(encrypted_data.encode()).decode()
|
||||
# [/DEF:EncryptionManager.decrypt:Function]
|
||||
# [/DEF:EncryptionManager:Class]
|
||||
|
||||
# [DEF:LLMProviderService:Class]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Service to manage LLM provider lifecycle.
|
||||
class LLMProviderService:
|
||||
# [DEF:LLMProviderService.__init__:Function]
|
||||
# @PURPOSE: Initialize the service with database session.
|
||||
# @PRE: db must be a valid SQLAlchemy Session.
|
||||
# @POST: Service ready for provider operations.
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
self.encryption = EncryptionManager()
|
||||
# [/DEF:LLMProviderService.__init__:Function]
|
||||
|
||||
# [DEF:get_all_providers:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Returns all configured LLM providers.
|
||||
# @PRE: Database connection must be active.
|
||||
# @POST: Returns list of all LLMProvider records.
|
||||
def get_all_providers(self) -> List[LLMProvider]:
|
||||
with belief_scope("get_all_providers"):
|
||||
return self.db.query(LLMProvider).all()
|
||||
# [/DEF:get_all_providers:Function]
|
||||
|
||||
# [DEF:get_provider:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Returns a single LLM provider by ID.
|
||||
# @PRE: provider_id must be a valid string.
|
||||
# @POST: Returns LLMProvider or None if not found.
|
||||
def get_provider(self, provider_id: str) -> Optional[LLMProvider]:
|
||||
with belief_scope("get_provider"):
|
||||
return self.db.query(LLMProvider).filter(LLMProvider.id == provider_id).first()
|
||||
# [/DEF:get_provider:Function]
|
||||
|
||||
# [DEF:create_provider:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Creates a new LLM provider with encrypted API key.
|
||||
# @PRE: config must contain valid provider configuration.
|
||||
# @POST: New provider created and persisted to database.
|
||||
def create_provider(self, config: LLMProviderConfig) -> LLMProvider:
|
||||
with belief_scope("create_provider"):
|
||||
encrypted_key = self.encryption.encrypt(config.api_key)
|
||||
@@ -101,10 +70,7 @@ class LLMProviderService:
|
||||
# [/DEF:create_provider:Function]
|
||||
|
||||
# [DEF:update_provider:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Updates an existing LLM provider.
|
||||
# @PRE: provider_id must exist, config must be valid.
|
||||
# @POST: Provider updated and persisted to database.
|
||||
def update_provider(self, provider_id: str, config: LLMProviderConfig) -> Optional[LLMProvider]:
|
||||
with belief_scope("update_provider"):
|
||||
db_provider = self.get_provider(provider_id)
|
||||
@@ -114,8 +80,7 @@ class LLMProviderService:
|
||||
db_provider.provider_type = config.provider_type.value
|
||||
db_provider.name = config.name
|
||||
db_provider.base_url = config.base_url
|
||||
# Only update API key if provided (not None and not empty)
|
||||
if config.api_key is not None and config.api_key != "":
|
||||
if config.api_key != "********":
|
||||
db_provider.api_key = self.encryption.encrypt(config.api_key)
|
||||
db_provider.default_model = config.default_model
|
||||
db_provider.is_active = config.is_active
|
||||
@@ -126,10 +91,7 @@ class LLMProviderService:
|
||||
# [/DEF:update_provider:Function]
|
||||
|
||||
# [DEF:delete_provider:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Deletes an LLM provider.
|
||||
# @PRE: provider_id must exist.
|
||||
# @POST: Provider removed from database.
|
||||
def delete_provider(self, provider_id: str) -> bool:
|
||||
with belief_scope("delete_provider"):
|
||||
db_provider = self.get_provider(provider_id)
|
||||
@@ -141,27 +103,13 @@ class LLMProviderService:
|
||||
# [/DEF:delete_provider:Function]
|
||||
|
||||
# [DEF:get_decrypted_api_key:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Returns the decrypted API key for a provider.
|
||||
# @PRE: provider_id must exist with valid encrypted key.
|
||||
# @POST: Returns decrypted API key or None on failure.
|
||||
def get_decrypted_api_key(self, provider_id: str) -> Optional[str]:
|
||||
with belief_scope("get_decrypted_api_key"):
|
||||
db_provider = self.get_provider(provider_id)
|
||||
if not db_provider:
|
||||
logger.warning(f"[get_decrypted_api_key] Provider {provider_id} not found in database")
|
||||
return None
|
||||
|
||||
logger.info(f"[get_decrypted_api_key] Provider found: {db_provider.id}")
|
||||
logger.info(f"[get_decrypted_api_key] Encrypted API key length: {len(db_provider.api_key) if db_provider.api_key else 0}")
|
||||
|
||||
try:
|
||||
decrypted_key = self.encryption.decrypt(db_provider.api_key)
|
||||
logger.info(f"[get_decrypted_api_key] Decryption successful, key length: {len(decrypted_key) if decrypted_key else 0}")
|
||||
return decrypted_key
|
||||
except Exception as e:
|
||||
logger.error(f"[get_decrypted_api_key] Decryption failed: {str(e)}")
|
||||
return None
|
||||
return self.encryption.decrypt(db_provider.api_key)
|
||||
# [/DEF:get_decrypted_api_key:Function]
|
||||
|
||||
# [/DEF:LLMProviderService:Class]
|
||||
|
||||
BIN
backend/ss-tools-storage/screenshots/4_20260129_174429.png
Normal file
BIN
backend/ss-tools-storage/screenshots/4_20260129_174429.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
BIN
backend/tasks.db
Normal file
BIN
backend/tasks.db
Normal file
Binary file not shown.
@@ -1,397 +0,0 @@
|
||||
# [DEF:test_log_persistence:Module]
|
||||
# @SEMANTICS: test, log, persistence, unit_test
|
||||
# @PURPOSE: Unit tests for TaskLogPersistenceService.
|
||||
# @LAYER: Test
|
||||
# @RELATION: TESTS -> TaskLogPersistenceService
|
||||
# @TIER: STANDARD
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from src.core.task_manager.persistence import TaskLogPersistenceService
|
||||
from src.core.task_manager.models import LogEntry
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TestLogPersistence:Class]
|
||||
# @PURPOSE: Test suite for TaskLogPersistenceService.
|
||||
# @TIER: STANDARD
|
||||
class TestLogPersistence:
|
||||
|
||||
# [DEF:setup_class:Function]
|
||||
# @PURPOSE: Setup test database and service instance.
|
||||
# @PRE: None.
|
||||
# @POST: In-memory database and service instance created.
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
"""Create an in-memory database for testing."""
|
||||
cls.engine = create_engine("sqlite:///:memory:")
|
||||
cls.SessionLocal = sessionmaker(bind=cls.engine)
|
||||
cls.service = TaskLogPersistenceService(cls.engine)
|
||||
# [/DEF:setup_class:Function]
|
||||
|
||||
# [DEF:teardown_class:Function]
|
||||
# @PURPOSE: Clean up test database.
|
||||
# @PRE: None.
|
||||
# @POST: Database disposed.
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
"""Dispose of the database engine."""
|
||||
cls.engine.dispose()
|
||||
# [/DEF:teardown_class:Function]
|
||||
|
||||
# [DEF:setup_method:Function]
|
||||
# @PURPOSE: Setup for each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Fresh database session created.
|
||||
def setup_method(self):
|
||||
"""Create a new session for each test."""
|
||||
self.session = self.SessionLocal()
|
||||
# [/DEF:setup_method:Function]
|
||||
|
||||
# [DEF:teardown_method:Function]
|
||||
# @PURPOSE: Cleanup after each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Session closed and rolled back.
|
||||
def teardown_method(self):
|
||||
"""Close the session after each test."""
|
||||
self.session.close()
|
||||
# [/DEF:teardown_method:Function]
|
||||
|
||||
# [DEF:test_add_log_single:Function]
|
||||
# @PURPOSE: Test adding a single log entry.
|
||||
# @PRE: Service and session initialized.
|
||||
# @POST: Log entry persisted to database.
|
||||
def test_add_log_single(self):
|
||||
"""Test adding a single log entry."""
|
||||
entry = LogEntry(
|
||||
task_id="test-task-1",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="test_source",
|
||||
message="Test message"
|
||||
)
|
||||
|
||||
self.service.add_log(entry)
|
||||
|
||||
# Query the database
|
||||
result = self.session.query(LogEntry).filter_by(task_id="test-task-1").first()
|
||||
|
||||
assert result is not None
|
||||
assert result.level == "INFO"
|
||||
assert result.source == "test_source"
|
||||
assert result.message == "Test message"
|
||||
# [/DEF:test_add_log_single:Function]
|
||||
|
||||
# [DEF:test_add_log_batch:Function]
|
||||
# @PURPOSE: Test adding multiple log entries in batch.
|
||||
# @PRE: Service and session initialized.
|
||||
# @POST: All log entries persisted to database.
|
||||
def test_add_log_batch(self):
|
||||
"""Test adding multiple log entries in batch."""
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-2",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="source1",
|
||||
message="Message 1"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-2",
|
||||
timestamp=datetime.now(),
|
||||
level="WARNING",
|
||||
source="source2",
|
||||
message="Message 2"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-2",
|
||||
timestamp=datetime.now(),
|
||||
level="ERROR",
|
||||
source="source3",
|
||||
message="Message 3"
|
||||
),
|
||||
]
|
||||
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Query the database
|
||||
results = self.session.query(LogEntry).filter_by(task_id="test-task-2").all()
|
||||
|
||||
assert len(results) == 3
|
||||
assert results[0].level == "INFO"
|
||||
assert results[1].level == "WARNING"
|
||||
assert results[2].level == "ERROR"
|
||||
# [/DEF:test_add_log_batch:Function]
|
||||
|
||||
# [DEF:test_get_logs_by_task_id:Function]
|
||||
# @PURPOSE: Test retrieving logs by task ID.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns logs for the specified task.
|
||||
def test_get_logs_by_task_id(self):
|
||||
"""Test retrieving logs by task ID."""
|
||||
# Add test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-3",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="source1",
|
||||
message=f"Message {i}"
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Retrieve logs
|
||||
logs = self.service.get_logs("test-task-3")
|
||||
|
||||
assert len(logs) == 5
|
||||
assert all(log.task_id == "test-task-3" for log in logs)
|
||||
# [/DEF:test_get_logs_by_task_id:Function]
|
||||
|
||||
# [DEF:test_get_logs_with_filters:Function]
|
||||
# @PURPOSE: Test retrieving logs with level and source filters.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns filtered logs.
|
||||
def test_get_logs_with_filters(self):
|
||||
"""Test retrieving logs with level and source filters."""
|
||||
# Add test logs with different levels and sources
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-4",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="api",
|
||||
message="Info message"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-4",
|
||||
timestamp=datetime.now(),
|
||||
level="WARNING",
|
||||
source="api",
|
||||
message="Warning message"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-4",
|
||||
timestamp=datetime.now(),
|
||||
level="ERROR",
|
||||
source="storage",
|
||||
message="Error message"
|
||||
),
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Test level filter
|
||||
warning_logs = self.service.get_logs("test-task-4", level="WARNING")
|
||||
assert len(warning_logs) == 1
|
||||
assert warning_logs[0].level == "WARNING"
|
||||
|
||||
# Test source filter
|
||||
api_logs = self.service.get_logs("test-task-4", source="api")
|
||||
assert len(api_logs) == 2
|
||||
assert all(log.source == "api" for log in api_logs)
|
||||
|
||||
# Test combined filters
|
||||
api_warning_logs = self.service.get_logs("test-task-4", level="WARNING", source="api")
|
||||
assert len(api_warning_logs) == 1
|
||||
# [/DEF:test_get_logs_with_filters:Function]
|
||||
|
||||
# [DEF:test_get_logs_with_pagination:Function]
|
||||
# @PURPOSE: Test retrieving logs with pagination.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns paginated logs.
|
||||
def test_get_logs_with_pagination(self):
|
||||
"""Test retrieving logs with pagination."""
|
||||
# Add 15 test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-5",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="test",
|
||||
message=f"Message {i}"
|
||||
)
|
||||
for i in range(15)
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Test first page
|
||||
page1 = self.service.get_logs("test-task-5", limit=10, offset=0)
|
||||
assert len(page1) == 10
|
||||
|
||||
# Test second page
|
||||
page2 = self.service.get_logs("test-task-5", limit=10, offset=10)
|
||||
assert len(page2) == 5
|
||||
# [/DEF:test_get_logs_with_pagination:Function]
|
||||
|
||||
# [DEF:test_get_logs_with_search:Function]
|
||||
# @PURPOSE: Test retrieving logs with search query.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns logs matching search query.
|
||||
def test_get_logs_with_search(self):
|
||||
"""Test retrieving logs with search query."""
|
||||
# Add test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-6",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="api",
|
||||
message="User authentication successful"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-6",
|
||||
timestamp=datetime.now(),
|
||||
level="ERROR",
|
||||
source="api",
|
||||
message="Failed to connect to database"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-6",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="storage",
|
||||
message="File saved successfully"
|
||||
),
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Test search for "authentication"
|
||||
auth_logs = self.service.get_logs("test-task-6", search="authentication")
|
||||
assert len(auth_logs) == 1
|
||||
assert "authentication" in auth_logs[0].message.lower()
|
||||
|
||||
# Test search for "failed"
|
||||
failed_logs = self.service.get_logs("test-task-6", search="failed")
|
||||
assert len(failed_logs) == 1
|
||||
assert "failed" in failed_logs[0].message.lower()
|
||||
# [/DEF:test_get_logs_with_search:Function]
|
||||
|
||||
# [DEF:test_get_log_stats:Function]
|
||||
# @PURPOSE: Test retrieving log statistics.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns statistics grouped by level and source.
|
||||
def test_get_log_stats(self):
|
||||
"""Test retrieving log statistics."""
|
||||
# Add test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-7",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="api",
|
||||
message="Info 1"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-7",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="api",
|
||||
message="Info 2"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-7",
|
||||
timestamp=datetime.now(),
|
||||
level="WARNING",
|
||||
source="api",
|
||||
message="Warning 1"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-7",
|
||||
timestamp=datetime.now(),
|
||||
level="ERROR",
|
||||
source="storage",
|
||||
message="Error 1"
|
||||
),
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Get stats
|
||||
stats = self.service.get_log_stats("test-task-7")
|
||||
|
||||
assert stats is not None
|
||||
assert stats["by_level"]["INFO"] == 2
|
||||
assert stats["by_level"]["WARNING"] == 1
|
||||
assert stats["by_level"]["ERROR"] == 1
|
||||
assert stats["by_source"]["api"] == 3
|
||||
assert stats["by_source"]["storage"] == 1
|
||||
# [/DEF:test_get_log_stats:Function]
|
||||
|
||||
# [DEF:test_get_log_sources:Function]
|
||||
# @PURPOSE: Test retrieving unique log sources.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Returns list of unique sources.
|
||||
def test_get_log_sources(self):
|
||||
"""Test retrieving unique log sources."""
|
||||
# Add test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-8",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="api",
|
||||
message="Message 1"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-8",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="storage",
|
||||
message="Message 2"
|
||||
),
|
||||
LogEntry(
|
||||
task_id="test-task-8",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="git",
|
||||
message="Message 3"
|
||||
),
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Get sources
|
||||
sources = self.service.get_log_sources("test-task-8")
|
||||
|
||||
assert len(sources) == 3
|
||||
assert "api" in sources
|
||||
assert "storage" in sources
|
||||
assert "git" in sources
|
||||
# [/DEF:test_get_log_sources:Function]
|
||||
|
||||
# [DEF:test_delete_logs_by_task_id:Function]
|
||||
# @PURPOSE: Test deleting logs by task ID.
|
||||
# @PRE: Service and session initialized, logs exist.
|
||||
# @POST: Logs for the task are deleted.
|
||||
def test_delete_logs_by_task_id(self):
|
||||
"""Test deleting logs by task ID."""
|
||||
# Add test logs
|
||||
entries = [
|
||||
LogEntry(
|
||||
task_id="test-task-9",
|
||||
timestamp=datetime.now(),
|
||||
level="INFO",
|
||||
source="test",
|
||||
message=f"Message {i}"
|
||||
)
|
||||
for i in range(3)
|
||||
]
|
||||
self.service.add_logs(entries)
|
||||
|
||||
# Verify logs exist
|
||||
logs_before = self.service.get_logs("test-task-9")
|
||||
assert len(logs_before) == 3
|
||||
|
||||
# Delete logs
|
||||
self.service.delete_logs("test-task-9")
|
||||
|
||||
# Verify logs are deleted
|
||||
logs_after = self.service.get_logs("test-task-9")
|
||||
assert len(logs_after) == 0
|
||||
# [/DEF:test_delete_logs_by_task_id:Function]
|
||||
|
||||
# [/DEF:TestLogPersistence:Class]
|
||||
# [/DEF:test_log_persistence:Module]
|
||||
@@ -1,30 +1,14 @@
|
||||
import pytest
|
||||
import logging
|
||||
from src.core.logger import (
|
||||
belief_scope,
|
||||
logger,
|
||||
configure_logger,
|
||||
get_task_log_level,
|
||||
should_log_task_level
|
||||
)
|
||||
from src.core.config_models import LoggingConfig
|
||||
from src.core.logger import belief_scope, logger
|
||||
|
||||
|
||||
# [DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
|
||||
# @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level.
|
||||
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
|
||||
# @POST: Logs are verified to contain Entry, Action, and Exit tags at DEBUG level.
|
||||
def test_belief_scope_logs_entry_action_exit_at_debug(caplog):
|
||||
"""Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level."""
|
||||
# Configure logger to DEBUG level
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
|
||||
caplog.set_level("DEBUG")
|
||||
# [DEF:test_belief_scope_logs_entry_action_exit:Function]
|
||||
# @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs.
|
||||
# @PRE: belief_scope is available. caplog fixture is used.
|
||||
# @POST: Logs are verified to contain Entry, Action, and Exit tags.
|
||||
def test_belief_scope_logs_entry_action_exit(caplog):
|
||||
"""Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs."""
|
||||
caplog.set_level("INFO")
|
||||
|
||||
with belief_scope("TestFunction"):
|
||||
logger.info("Doing something important")
|
||||
@@ -35,28 +19,16 @@ def test_belief_scope_logs_entry_action_exit_at_debug(caplog):
|
||||
assert any("[TestFunction][Entry]" in msg for msg in log_messages), "Entry log not found"
|
||||
assert any("[TestFunction][Action] Doing something important" in msg for msg in log_messages), "Action log not found"
|
||||
assert any("[TestFunction][Exit]" in msg for msg in log_messages), "Exit log not found"
|
||||
|
||||
# Reset to INFO
|
||||
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
|
||||
configure_logger(config)
|
||||
# [/DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
|
||||
# [/DEF:test_belief_scope_logs_entry_action_exit:Function]
|
||||
|
||||
|
||||
# [DEF:test_belief_scope_error_handling:Function]
|
||||
# @PURPOSE: Test that belief_scope logs Coherence:Failed on exception.
|
||||
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
|
||||
# @PRE: belief_scope is available. caplog fixture is used.
|
||||
# @POST: Logs are verified to contain Coherence:Failed tag.
|
||||
def test_belief_scope_error_handling(caplog):
|
||||
"""Test that belief_scope logs Coherence:Failed on exception."""
|
||||
# Configure logger to DEBUG level
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
|
||||
caplog.set_level("DEBUG")
|
||||
caplog.set_level("INFO")
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with belief_scope("FailingFunction"):
|
||||
@@ -67,28 +39,16 @@ def test_belief_scope_error_handling(caplog):
|
||||
assert any("[FailingFunction][Entry]" in msg for msg in log_messages), "Entry log not found"
|
||||
assert any("[FailingFunction][Coherence:Failed]" in msg for msg in log_messages), "Failed coherence log not found"
|
||||
# Exit should not be logged on failure
|
||||
|
||||
# Reset to INFO
|
||||
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
|
||||
configure_logger(config)
|
||||
# [/DEF:test_belief_scope_error_handling:Function]
|
||||
|
||||
|
||||
# [DEF:test_belief_scope_success_coherence:Function]
|
||||
# @PURPOSE: Test that belief_scope logs Coherence:OK on success.
|
||||
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
|
||||
# @PRE: belief_scope is available. caplog fixture is used.
|
||||
# @POST: Logs are verified to contain Coherence:OK tag.
|
||||
def test_belief_scope_success_coherence(caplog):
|
||||
"""Test that belief_scope logs Coherence:OK on success."""
|
||||
# Configure logger to DEBUG level
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
|
||||
caplog.set_level("DEBUG")
|
||||
caplog.set_level("INFO")
|
||||
|
||||
with belief_scope("SuccessFunction"):
|
||||
pass
|
||||
@@ -96,119 +56,4 @@ def test_belief_scope_success_coherence(caplog):
|
||||
log_messages = [record.message for record in caplog.records]
|
||||
|
||||
assert any("[SuccessFunction][Coherence:OK]" in msg for msg in log_messages), "Success coherence log not found"
|
||||
|
||||
# Reset to INFO
|
||||
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
|
||||
configure_logger(config)
|
||||
# [/DEF:test_belief_scope_success_coherence:Function]
|
||||
|
||||
|
||||
# [DEF:test_belief_scope_not_visible_at_info:Function]
|
||||
# @PURPOSE: Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level.
|
||||
# @PRE: belief_scope is available. caplog fixture is used.
|
||||
# @POST: Entry/Exit/Coherence logs are not captured at INFO level.
|
||||
def test_belief_scope_not_visible_at_info(caplog):
|
||||
"""Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level."""
|
||||
caplog.set_level("INFO")
|
||||
|
||||
with belief_scope("InfoLevelFunction"):
|
||||
logger.info("Doing something important")
|
||||
|
||||
log_messages = [record.message for record in caplog.records]
|
||||
|
||||
# Action log should be visible
|
||||
assert any("[InfoLevelFunction][Action] Doing something important" in msg for msg in log_messages), "Action log not found"
|
||||
# Entry/Exit/Coherence should NOT be visible at INFO level
|
||||
assert not any("[InfoLevelFunction][Entry]" in msg for msg in log_messages), "Entry log should not be visible at INFO"
|
||||
assert not any("[InfoLevelFunction][Exit]" in msg for msg in log_messages), "Exit log should not be visible at INFO"
|
||||
assert not any("[InfoLevelFunction][Coherence:OK]" in msg for msg in log_messages), "Coherence log should not be visible at INFO"
|
||||
# [/DEF:test_belief_scope_not_visible_at_info:Function]
|
||||
|
||||
|
||||
# [DEF:test_task_log_level_default:Function]
|
||||
# @PURPOSE: Test that default task log level is INFO.
|
||||
# @PRE: None.
|
||||
# @POST: Default level is INFO.
|
||||
def test_task_log_level_default():
|
||||
"""Test that default task log level is INFO."""
|
||||
level = get_task_log_level()
|
||||
assert level == "INFO"
|
||||
# [/DEF:test_task_log_level_default:Function]
|
||||
|
||||
|
||||
# [DEF:test_should_log_task_level:Function]
|
||||
# @PURPOSE: Test that should_log_task_level correctly filters log levels.
|
||||
# @PRE: None.
|
||||
# @POST: Filtering works correctly for all level combinations.
|
||||
def test_should_log_task_level():
|
||||
"""Test that should_log_task_level correctly filters log levels."""
|
||||
# Default level is INFO
|
||||
assert should_log_task_level("ERROR") is True, "ERROR should be logged at INFO threshold"
|
||||
assert should_log_task_level("WARNING") is True, "WARNING should be logged at INFO threshold"
|
||||
assert should_log_task_level("INFO") is True, "INFO should be logged at INFO threshold"
|
||||
assert should_log_task_level("DEBUG") is False, "DEBUG should NOT be logged at INFO threshold"
|
||||
# [/DEF:test_should_log_task_level:Function]
|
||||
|
||||
|
||||
# [DEF:test_configure_logger_task_log_level:Function]
|
||||
# @PURPOSE: Test that configure_logger updates task_log_level.
|
||||
# @PRE: LoggingConfig is available.
|
||||
# @POST: task_log_level is updated correctly.
|
||||
def test_configure_logger_task_log_level():
|
||||
"""Test that configure_logger updates task_log_level."""
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
|
||||
assert get_task_log_level() == "DEBUG", "task_log_level should be DEBUG"
|
||||
assert should_log_task_level("DEBUG") is True, "DEBUG should be logged at DEBUG threshold"
|
||||
|
||||
# Reset to INFO
|
||||
config = LoggingConfig(
|
||||
level="INFO",
|
||||
task_log_level="INFO",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
assert get_task_log_level() == "INFO", "task_log_level should be reset to INFO"
|
||||
# [/DEF:test_configure_logger_task_log_level:Function]
|
||||
|
||||
|
||||
# [DEF:test_enable_belief_state_flag:Function]
|
||||
# @PURPOSE: Test that enable_belief_state flag controls belief_scope logging.
|
||||
# @PRE: LoggingConfig is available. caplog fixture is used.
|
||||
# @POST: belief_scope logs are controlled by the flag.
|
||||
def test_enable_belief_state_flag(caplog):
|
||||
"""Test that enable_belief_state flag controls belief_scope logging."""
|
||||
# Disable belief state
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=False
|
||||
)
|
||||
configure_logger(config)
|
||||
|
||||
caplog.set_level("DEBUG")
|
||||
|
||||
with belief_scope("DisabledFunction"):
|
||||
logger.info("Doing something")
|
||||
|
||||
log_messages = [record.message for record in caplog.records]
|
||||
|
||||
# Entry and Exit should NOT be logged when disabled
|
||||
assert not any("[DisabledFunction][Entry]" in msg for msg in log_messages), "Entry should not be logged when disabled"
|
||||
assert not any("[DisabledFunction][Exit]" in msg for msg in log_messages), "Exit should not be logged when disabled"
|
||||
# Coherence:OK should still be logged (internal tracking)
|
||||
assert any("[DisabledFunction][Coherence:OK]" in msg for msg in log_messages), "Coherence should still be logged"
|
||||
|
||||
# Re-enable for other tests
|
||||
config = LoggingConfig(
|
||||
level="DEBUG",
|
||||
task_log_level="DEBUG",
|
||||
enable_belief_state=True
|
||||
)
|
||||
configure_logger(config)
|
||||
# [/DEF:test_enable_belief_state_flag:Function]
|
||||
# [/DEF:test_belief_scope_success_coherence:Function]
|
||||
@@ -1,377 +0,0 @@
|
||||
# [DEF:test_task_logger:Module]
|
||||
# @SEMANTICS: test, task_logger, task_context, unit_test
|
||||
# @PURPOSE: Unit tests for TaskLogger and TaskContext.
|
||||
# @LAYER: Test
|
||||
# @RELATION: TESTS -> TaskLogger, TaskContext
|
||||
# @TIER: STANDARD
|
||||
|
||||
# [SECTION: IMPORTS]
|
||||
import pytest
|
||||
from unittest.mock import Mock, MagicMock
|
||||
from datetime import datetime
|
||||
|
||||
from src.core.task_manager.task_logger import TaskLogger
|
||||
from src.core.task_manager.context import TaskContext
|
||||
from src.core.task_manager.models import LogEntry
|
||||
# [/SECTION]
|
||||
|
||||
# [DEF:TestTaskLogger:Class]
|
||||
# @PURPOSE: Test suite for TaskLogger.
|
||||
# @TIER: STANDARD
|
||||
class TestTaskLogger:
|
||||
|
||||
# [DEF:setup_method:Function]
|
||||
# @PURPOSE: Setup for each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Mock add_log_fn created.
|
||||
def setup_method(self):
|
||||
"""Create a mock add_log function for testing."""
|
||||
self.mock_add_log = Mock()
|
||||
self.logger = TaskLogger(
|
||||
task_id="test-task-1",
|
||||
add_log_fn=self.mock_add_log,
|
||||
source="test_source"
|
||||
)
|
||||
# [/DEF:setup_method:Function]
|
||||
|
||||
# [DEF:test_init:Function]
|
||||
# @PURPOSE: Test TaskLogger initialization.
|
||||
# @PRE: None.
|
||||
# @POST: Logger instance created with correct attributes.
|
||||
def test_init(self):
|
||||
"""Test TaskLogger initialization."""
|
||||
assert self.logger._task_id == "test-task-1"
|
||||
assert self.logger._default_source == "test_source"
|
||||
assert self.logger._add_log == self.mock_add_log
|
||||
# [/DEF:test_init:Function]
|
||||
|
||||
# [DEF:test_with_source:Function]
|
||||
# @PURPOSE: Test creating a sub-logger with different source.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: New logger created with different source but same task_id.
|
||||
def test_with_source(self):
|
||||
"""Test creating a sub-logger with different source."""
|
||||
sub_logger = self.logger.with_source("new_source")
|
||||
|
||||
assert sub_logger._task_id == "test-task-1"
|
||||
assert sub_logger._default_source == "new_source"
|
||||
assert sub_logger._add_log == self.mock_add_log
|
||||
# [/DEF:test_with_source:Function]
|
||||
|
||||
# [DEF:test_debug:Function]
|
||||
# @PURPOSE: Test debug log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with DEBUG level.
|
||||
def test_debug(self):
|
||||
"""Test debug logging."""
|
||||
self.logger.debug("Debug message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="DEBUG",
|
||||
message="Debug message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_debug:Function]
|
||||
|
||||
# [DEF:test_info:Function]
|
||||
# @PURPOSE: Test info log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with INFO level.
|
||||
def test_info(self):
|
||||
"""Test info logging."""
|
||||
self.logger.info("Info message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Info message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_info:Function]
|
||||
|
||||
# [DEF:test_warning:Function]
|
||||
# @PURPOSE: Test warning log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with WARNING level.
|
||||
def test_warning(self):
|
||||
"""Test warning logging."""
|
||||
self.logger.warning("Warning message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="WARNING",
|
||||
message="Warning message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_warning:Function]
|
||||
|
||||
# [DEF:test_error:Function]
|
||||
# @PURPOSE: Test error log level.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with ERROR level.
|
||||
def test_error(self):
|
||||
"""Test error logging."""
|
||||
self.logger.error("Error message")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="ERROR",
|
||||
message="Error message",
|
||||
source="test_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_error:Function]
|
||||
|
||||
# [DEF:test_error_with_metadata:Function]
|
||||
# @PURPOSE: Test error logging with metadata.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with ERROR level and metadata.
|
||||
def test_error_with_metadata(self):
|
||||
"""Test error logging with metadata."""
|
||||
metadata = {"error_code": 500, "details": "Connection failed"}
|
||||
self.logger.error("Error message", metadata=metadata)
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="ERROR",
|
||||
message="Error message",
|
||||
source="test_source",
|
||||
metadata=metadata
|
||||
)
|
||||
# [/DEF:test_error_with_metadata:Function]
|
||||
|
||||
# [DEF:test_progress:Function]
|
||||
# @PURPOSE: Test progress logging.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with INFO level and progress metadata.
|
||||
def test_progress(self):
|
||||
"""Test progress logging."""
|
||||
self.logger.progress("Processing items", percent=50)
|
||||
|
||||
expected_metadata = {"progress": 50}
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Processing items",
|
||||
source="test_source",
|
||||
metadata=expected_metadata
|
||||
)
|
||||
# [/DEF:test_progress:Function]
|
||||
|
||||
# [DEF:test_progress_clamping:Function]
|
||||
# @PURPOSE: Test progress value clamping (0-100).
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: Progress values clamped to 0-100 range.
|
||||
def test_progress_clamping(self):
|
||||
"""Test progress value clamping."""
|
||||
# Test below 0
|
||||
self.logger.progress("Below 0", percent=-10)
|
||||
call1 = self.mock_add_log.call_args_list[0]
|
||||
assert call1.kwargs["metadata"]["progress"] == 0
|
||||
|
||||
self.mock_add_log.reset_mock()
|
||||
|
||||
# Test above 100
|
||||
self.logger.progress("Above 100", percent=150)
|
||||
call2 = self.mock_add_log.call_args_list[0]
|
||||
assert call2.kwargs["metadata"]["progress"] == 100
|
||||
# [/DEF:test_progress_clamping:Function]
|
||||
|
||||
# [DEF:test_source_override:Function]
|
||||
# @PURPOSE: Test overriding the default source.
|
||||
# @PRE: Logger initialized.
|
||||
# @POST: add_log_fn called with overridden source.
|
||||
def test_source_override(self):
|
||||
"""Test overriding the default source."""
|
||||
self.logger.info("Message", source="override_source")
|
||||
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-1",
|
||||
level="INFO",
|
||||
message="Message",
|
||||
source="override_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_source_override:Function]
|
||||
|
||||
# [DEF:test_sub_logger_source_independence:Function]
|
||||
# @PURPOSE: Test sub-logger independence from parent.
|
||||
# @PRE: Logger and sub-logger initialized.
|
||||
# @POST: Sub-logger has different source, parent unchanged.
|
||||
def test_sub_logger_source_independence(self):
|
||||
"""Test sub-logger source independence from parent."""
|
||||
sub_logger = self.logger.with_source("sub_source")
|
||||
|
||||
# Log with parent
|
||||
self.logger.info("Parent message")
|
||||
|
||||
# Log with sub-logger
|
||||
sub_logger.info("Sub message")
|
||||
|
||||
# Verify both calls were made with correct sources
|
||||
calls = self.mock_add_log.call_args_list
|
||||
assert len(calls) == 2
|
||||
assert calls[0].kwargs["source"] == "test_source"
|
||||
assert calls[1].kwargs["source"] == "sub_source"
|
||||
# [/DEF:test_sub_logger_source_independence:Function]
|
||||
|
||||
# [/DEF:TestTaskLogger:Class]
|
||||
|
||||
# [DEF:TestTaskContext:Class]
|
||||
# @PURPOSE: Test suite for TaskContext.
|
||||
# @TIER: STANDARD
|
||||
class TestTaskContext:
|
||||
|
||||
# [DEF:setup_method:Function]
|
||||
# @PURPOSE: Setup for each test method.
|
||||
# @PRE: None.
|
||||
# @POST: Mock add_log_fn created.
|
||||
def setup_method(self):
|
||||
"""Create a mock add_log function for testing."""
|
||||
self.mock_add_log = Mock()
|
||||
self.params = {"param1": "value1", "param2": "value2"}
|
||||
self.context = TaskContext(
|
||||
task_id="test-task-2",
|
||||
add_log_fn=self.mock_add_log,
|
||||
params=self.params,
|
||||
default_source="plugin"
|
||||
)
|
||||
# [/DEF:setup_method:Function]
|
||||
|
||||
# [DEF:test_init:Function]
|
||||
# @PURPOSE: Test TaskContext initialization.
|
||||
# @PRE: None.
|
||||
# @POST: Context instance created with correct attributes.
|
||||
def test_init(self):
|
||||
"""Test TaskContext initialization."""
|
||||
assert self.context._task_id == "test-task-2"
|
||||
assert self.context._params == self.params
|
||||
assert isinstance(self.context._logger, TaskLogger)
|
||||
assert self.context._logger._default_source == "plugin"
|
||||
# [/DEF:test_init:Function]
|
||||
|
||||
# [DEF:test_task_id_property:Function]
|
||||
# @PURPOSE: Test task_id property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns correct task_id.
|
||||
def test_task_id_property(self):
|
||||
"""Test task_id property."""
|
||||
assert self.context.task_id == "test-task-2"
|
||||
# [/DEF:test_task_id_property:Function]
|
||||
|
||||
# [DEF:test_logger_property:Function]
|
||||
# @PURPOSE: Test logger property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns TaskLogger instance.
|
||||
def test_logger_property(self):
|
||||
"""Test logger property."""
|
||||
logger = self.context.logger
|
||||
assert isinstance(logger, TaskLogger)
|
||||
assert logger._task_id == "test-task-2"
|
||||
assert logger._default_source == "plugin"
|
||||
# [/DEF:test_logger_property:Function]
|
||||
|
||||
# [DEF:test_params_property:Function]
|
||||
# @PURPOSE: Test params property.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Returns correct params dict.
|
||||
def test_params_property(self):
|
||||
"""Test params property."""
|
||||
assert self.context.params == self.params
|
||||
# [/DEF:test_params_property:Function]
|
||||
|
||||
# [DEF:test_get_param:Function]
|
||||
# @PURPOSE: Test getting a specific parameter.
|
||||
# @PRE: Context initialized with params.
|
||||
# @POST: Returns parameter value or default.
|
||||
def test_get_param(self):
|
||||
"""Test getting a specific parameter."""
|
||||
assert self.context.get_param("param1") == "value1"
|
||||
assert self.context.get_param("param2") == "value2"
|
||||
assert self.context.get_param("nonexistent") is None
|
||||
assert self.context.get_param("nonexistent", "default") == "default"
|
||||
# [/DEF:test_get_param:Function]
|
||||
|
||||
# [DEF:test_create_sub_context:Function]
|
||||
# @PURPOSE: Test creating a sub-context with different source.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: New context created with different logger source.
|
||||
def test_create_sub_context(self):
|
||||
"""Test creating a sub-context with different source."""
|
||||
sub_context = self.context.create_sub_context("new_source")
|
||||
|
||||
assert sub_context._task_id == "test-task-2"
|
||||
assert sub_context._params == self.params
|
||||
assert sub_context._logger._default_source == "new_source"
|
||||
assert sub_context._logger._task_id == "test-task-2"
|
||||
# [/DEF:test_create_sub_context:Function]
|
||||
|
||||
# [DEF:test_context_logger_delegates_to_task_logger:Function]
|
||||
# @PURPOSE: Test context logger delegates to TaskLogger.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Logger calls are delegated to TaskLogger.
|
||||
def test_context_logger_delegates_to_task_logger(self):
|
||||
"""Test context logger delegates to TaskLogger."""
|
||||
# Call through context
|
||||
self.context.logger.info("Test message")
|
||||
|
||||
# Verify the mock was called
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-2",
|
||||
level="INFO",
|
||||
message="Test message",
|
||||
source="plugin",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_context_logger_delegates_to_task_logger:Function]
|
||||
|
||||
# [DEF:test_sub_context_with_source:Function]
|
||||
# @PURPOSE: Test sub-context logger uses new source.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Sub-context logger uses new source.
|
||||
def test_sub_context_with_source(self):
|
||||
"""Test sub-context logger uses new source."""
|
||||
sub_context = self.context.create_sub_context("api_source")
|
||||
|
||||
# Log through sub-context
|
||||
sub_context.logger.info("API message")
|
||||
|
||||
# Verify the mock was called with new source
|
||||
self.mock_add_log.assert_called_once_with(
|
||||
task_id="test-task-2",
|
||||
level="INFO",
|
||||
message="API message",
|
||||
source="api_source",
|
||||
metadata=None
|
||||
)
|
||||
# [/DEF:test_sub_context_with_source:Function]
|
||||
|
||||
# [DEF:test_multiple_sub_contexts:Function]
|
||||
# @PURPOSE: Test creating multiple sub-contexts.
|
||||
# @PRE: Context initialized.
|
||||
# @POST: Each sub-context has independent logger source.
|
||||
def test_multiple_sub_contexts(self):
|
||||
"""Test creating multiple sub-contexts."""
|
||||
sub1 = self.context.create_sub_context("source1")
|
||||
sub2 = self.context.create_sub_context("source2")
|
||||
sub3 = self.context.create_sub_context("source3")
|
||||
|
||||
assert sub1._logger._default_source == "source1"
|
||||
assert sub2._logger._default_source == "source2"
|
||||
assert sub3._logger._default_source == "source3"
|
||||
|
||||
# All should have same task_id and params
|
||||
assert sub1._task_id == "test-task-2"
|
||||
assert sub2._task_id == "test-task-2"
|
||||
assert sub3._task_id == "test-task-2"
|
||||
assert sub1._params == self.params
|
||||
assert sub2._params == self.params
|
||||
assert sub3._params == self.params
|
||||
# [/DEF:test_multiple_sub_contexts:Function]
|
||||
|
||||
# [/DEF:TestTaskContext:Class]
|
||||
# [/DEF:test_task_logger:Module]
|
||||
@@ -1,38 +0,0 @@
|
||||
# Архитектурное решение: Standalone сервис vs Интеграция в Superset
|
||||
|
||||
## Рекомендация: Оставить отдельным сервисом
|
||||
|
||||
Хотя интеграция непосредственно в Superset может показаться привлекательной для создания "единого интерфейса", **настоятельно рекомендуется оставить `ss-tools` отдельным сервисом** по следующим архитектурным и техническим причинам.
|
||||
|
||||
### 1. Архитектурный паттерн: Оркестратор против Инстанса
|
||||
* **Проблема:** `ss-tools` действует как **"Менеджер менеджеров"**. Его основная ценность заключается в подключении и перемещении данных *между* различными окружениями Superset (Dev → Stage → Prod).
|
||||
* **Почему разделение лучше:** Если встроить эту логику в Superset, придется сделать один конкретный инстанс (например, "Dev") "главным контроллером" для остальных. Если этот инстанс упадет, вы потеряете возможности управления. Отдельный инструмент находится *над* окружениями, рассматривая их все как равные endpoints.
|
||||
|
||||
### 2. Несовместимость технологического стека
|
||||
* **Superset:** Бэкенд на **Flask** (Python) с фронтендом на **React**.
|
||||
* **ss-tools:** Бэкенд на **FastAPI** (Python) с фронтендом на **SvelteKit**.
|
||||
* **Цена миграции:** Перенос в Superset потребует **полного переписывания** всего фронтенда с Svelte на React, чтобы он мог работать как плагин или расширение Superset. Также потребуется рефакторинг внедрения зависимостей (Dependency Injection) из FastAPI, чтобы соответствовать структуре Flask-AppBuilder в Superset.
|
||||
|
||||
### 3. Цикл релизов и гибкость
|
||||
* **Superset:** Имеет сложный и медленный цикл релизов. Внедрение кастомных фич в основной репозиторий (upstream) затруднительно, а поддержка собственного "форка" Superset — это кошмар сопровождения (конфликты слияния при каждом обновлении).
|
||||
* **ss-tools:** Как отдельный микросервис, вы можете развертывать новые функции (например, новые LLM плагины или Git-процессы) мгновенно, не беспокоясь о том, что сломаете основную BI-платформу.
|
||||
|
||||
### 4. Безопасность и изоляция
|
||||
* **Права доступа:** `ss-tools` имеет свои собственные страницы администрирования (`AdminRolesPage` и `AdminUsersPage`), управляющие правами специально для задач *деплоя* и *резервного копирования*. Это задачи DevOps, а не бизнес-аналитики (BI). Смешивание привилегий развертывания с привилегиями просмотра данных в одной системе увеличивает "радиус поражения" в случае проблем с безопасностью.
|
||||
|
||||
### Техническая реализуемость (если все же решиться)
|
||||
Это технически возможно, но процесс будет выглядеть так:
|
||||
1. **Frontend:** Придется переписать все компоненты Svelte (`DashboardGrid`, `MappingTable` и т.д.) как компоненты React, чтобы они могли жить внутри `superset-frontend`.
|
||||
2. **Backend:** Вероятно, придется реализовать это как серверное **расширение Superset** (используя Flask Blueprints), создав новые API endpoints, которые будет вызывать фронтенд на React.
|
||||
3. **Database:** Придется добавить таблицы проекта (`task_records`, `connection_configs`) в базу данных метаданных Superset через миграции Alembic.
|
||||
|
||||
### Итоговая таблица
|
||||
|
||||
| Характеристика | Отдельный сервис (Текущий) | Интеграция в Superset |
|
||||
| :--- | :--- | :--- |
|
||||
| **Скорость разработки** | 🚀 Высокая (Независимая) | 🐢 Низкая (Связана с монолитом) |
|
||||
| **Технологический стек** | Современный (FastAPI + SvelteKit) | Legacy/Строгий (Flask + React) |
|
||||
| **Управление мульти-окружением** | ✅ Нативно (Спроектирован для этого) | ⚠️ Неудобно (Один инстанс управляет всеми) |
|
||||
| **Поддержка (Maintenance)** | ✅ Низкая (Обновление в любое время) | ❌ Высокая (Rebase при каждом обновлении Superset) |
|
||||
|
||||
**Вывод:** Проект уже хорошо спроектирован как специализированный инструмент DevOps (слой `DevOps/Tooling` в вашей семантической карте). Слияние его с Superset, скорее всего, приведет к техническому долгу и регрессии функциональности.
|
||||
@@ -64,156 +64,24 @@ class HelloWorldPlugin(PluginBase):
|
||||
"required": ["name"],
|
||||
}
|
||||
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
name = params["name"]
|
||||
if context:
|
||||
context.logger.info(f"Hello, {name}!")
|
||||
else:
|
||||
print(f"Hello, {name}!")
|
||||
print(f"Hello, {name}!")
|
||||
```
|
||||
|
||||
## 4. Logging with TaskContext
|
||||
## 4. Logging
|
||||
|
||||
Plugins now support TaskContext for structured logging with source attribution. The `context` parameter provides access to a logger that automatically tags logs with the task ID and a source identifier.
|
||||
|
||||
### 4.1. Basic Logging
|
||||
|
||||
Use `context.logger` to log messages with automatic source attribution:
|
||||
You can use the global logger instance to log messages from your plugin. The logger is available in the `superset_tool.utils.logger` module.
|
||||
|
||||
```python
|
||||
from typing import Dict, Any, Optional
|
||||
from ..core.plugin_base import PluginBase
|
||||
from ..core.task_manager.context import TaskContext
|
||||
from superset_tool.utils.logger import SupersetLogger
|
||||
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
if context:
|
||||
# Use TaskContext logger for structured logging
|
||||
context.logger.info("My plugin is running!")
|
||||
else:
|
||||
# Fallback to global logger for backward compatibility
|
||||
from ..core.logger import logger
|
||||
logger.info("My plugin is running!")
|
||||
logger = SupersetLogger()
|
||||
|
||||
async def execute(self, params: Dict[str, Any]):
|
||||
logger.info("My plugin is running!")
|
||||
```
|
||||
|
||||
### 4.2. Source Attribution
|
||||
|
||||
For better log organization, create sub-loggers for different components:
|
||||
|
||||
```python
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
if context:
|
||||
# Create sub-loggers for different components
|
||||
api_log = context.logger.with_source("api")
|
||||
storage_log = context.logger.with_source("storage")
|
||||
|
||||
api_log.info("Connecting to API...")
|
||||
storage_log.info("Saving file...")
|
||||
else:
|
||||
# Fallback to global logger
|
||||
from ..core.logger import logger
|
||||
logger.info("My plugin is running!")
|
||||
```
|
||||
|
||||
### 4.3. Log Levels
|
||||
|
||||
The logger supports standard log levels. Use them appropriately:
|
||||
|
||||
| Level | Usage |
|
||||
|-------|-------|
|
||||
| `DEBUG` | Detailed diagnostic information (API responses, internal state). Only visible when log level is set to DEBUG. |
|
||||
| `INFO` | General operational messages (start/complete notifications, progress updates). |
|
||||
| `WARNING` | Non-critical issues that don't stop execution (deprecated APIs, retry attempts). |
|
||||
| `ERROR` | Failures that prevent an operation from completing (API errors, validation failures). |
|
||||
|
||||
```python
|
||||
# Good: Use DEBUG for verbose diagnostic info
|
||||
api_log.debug(f"API response: {response.json()}")
|
||||
|
||||
# Good: Use INFO for operational milestones
|
||||
log.info(f"Starting backup for environment: {env}")
|
||||
|
||||
# Good: Use WARNING for recoverable issues
|
||||
log.warning(f"Rate limit hit, retrying in {delay}s")
|
||||
|
||||
# Good: Use ERROR for failures
|
||||
log.error(f"Failed to connect to database: {e}")
|
||||
```
|
||||
|
||||
### 4.4. Progress Logging
|
||||
|
||||
For operations that report progress, use the `progress` method:
|
||||
|
||||
```python
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
if context:
|
||||
total_items = 100
|
||||
for i, item in enumerate(items):
|
||||
# Report progress with percentage
|
||||
percent = (i + 1) / total_items * 100
|
||||
context.logger.progress(f"Processing {item}", percent=percent)
|
||||
else:
|
||||
# Fallback
|
||||
from ..core.logger import logger
|
||||
logger.info("My plugin is running!")
|
||||
```
|
||||
|
||||
### 4.5. Logging with Metadata
|
||||
|
||||
You can include structured metadata with log entries:
|
||||
|
||||
```python
|
||||
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
|
||||
if context:
|
||||
context.logger.error(
|
||||
"Operation failed",
|
||||
metadata={"error_code": 500, "details": "Connection timeout"}
|
||||
)
|
||||
else:
|
||||
from ..core.logger import logger
|
||||
logger.error("Operation failed")
|
||||
```
|
||||
|
||||
### 4.6. Common Source Names
|
||||
|
||||
For consistency across plugins, use these standard source names:
|
||||
|
||||
| Source | Usage |
|
||||
|--------|-------|
|
||||
| `superset_api` | Superset REST API calls |
|
||||
| `postgres` | PostgreSQL database operations |
|
||||
| `storage` | File system operations |
|
||||
| `git` | Git operations |
|
||||
| `llm` | LLM API calls |
|
||||
| `screenshot` | Screenshot capture operations |
|
||||
| `migration` | Migration-specific logic |
|
||||
| `backup` | Backup operations |
|
||||
| `debug` | Debug/diagnostic operations |
|
||||
| `search` | Search operations |
|
||||
|
||||
### 4.7. Best Practices
|
||||
|
||||
1. **Always check for context**: Support backward compatibility by checking if `context` is available:
|
||||
```python
|
||||
log = context.logger if context else logger
|
||||
```
|
||||
|
||||
2. **Use source attribution**: Create sub-loggers for different components to make filtering easier in the UI.
|
||||
|
||||
3. **Use appropriate log levels**:
|
||||
- `DEBUG`: Verbose diagnostic info (API responses, internal state)
|
||||
- `INFO`: Operational milestones (start, complete, progress)
|
||||
- `WARNING`: Recoverable issues (rate limits, deprecated APIs)
|
||||
- `ERROR`: Failures that stop an operation
|
||||
|
||||
4. **Log progress for long operations**: Use `progress()` for operations that take time:
|
||||
```python
|
||||
for i, item in enumerate(items):
|
||||
percent = (i + 1) / len(items) * 100
|
||||
log.progress(f"Processing {item}", percent=percent)
|
||||
```
|
||||
|
||||
5. **Keep DEBUG logs verbose, INFO logs concise**: DEBUG logs can include full API responses, while INFO logs should be one-line summaries.
|
||||
|
||||
## 5. Testing
|
||||
|
||||
To test your plugin, simply run the application and navigate to the web UI. Your plugin should appear in the list of available tools.
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:DashboardGrid:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: dashboard, grid, selection, pagination
|
||||
@PURPOSE: Displays a grid of dashboards with selection and pagination.
|
||||
@LAYER: Component
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:Footer:Component] -->
|
||||
<!--
|
||||
@TIER: TRIVIAL
|
||||
@SEMANTICS: footer, layout, copyright
|
||||
@PURPOSE: Displays the application footer with copyright information.
|
||||
@LAYER: UI
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:Navbar:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: navbar, navigation, header, layout
|
||||
@PURPOSE: Main navigation bar for the application.
|
||||
@LAYER: UI
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
<!-- [DEF:TaskLogViewer:Component] -->
|
||||
<!--
|
||||
@SEMANTICS: task, log, viewer, modal, inline
|
||||
@PURPOSE: Displays detailed logs for a specific task in a modal or inline using TaskLogPanel.
|
||||
@PURPOSE: Displays detailed logs for a specific task in a modal or inline.
|
||||
@LAYER: UI
|
||||
@RELATION: USES -> frontend/src/services/taskService.js, frontend/src/components/tasks/TaskLogPanel.svelte
|
||||
@RELATION: USES -> frontend/src/services/taskService.js
|
||||
-->
|
||||
<script>
|
||||
import { createEventDispatcher, onMount, onDestroy } from 'svelte';
|
||||
import { getTaskLogs } from '../services/taskService.js';
|
||||
import { t } from '../lib/i18n';
|
||||
import { Button } from '../lib/ui';
|
||||
import TaskLogPanel from './tasks/TaskLogPanel.svelte';
|
||||
|
||||
export let show = false;
|
||||
export let inline = false;
|
||||
@@ -24,8 +23,7 @@
|
||||
let error = "";
|
||||
let interval;
|
||||
let autoScroll = true;
|
||||
let selectedSource = 'all';
|
||||
let selectedLevel = 'all';
|
||||
let logContainer;
|
||||
|
||||
$: shouldShow = inline || show;
|
||||
|
||||
@@ -38,11 +36,12 @@
|
||||
*/
|
||||
async function fetchLogs() {
|
||||
if (!taskId) return;
|
||||
console.log(`[fetchLogs][Action] Fetching logs for task context={{'taskId': '${taskId}', 'source': '${selectedSource}', 'level': '${selectedLevel}'}}`);
|
||||
console.log(`[fetchLogs][Action] Fetching logs for task context={{'taskId': '${taskId}'}}`);
|
||||
try {
|
||||
// Note: getTaskLogs currently doesn't support filters, but we can filter client-side for now
|
||||
// or update taskService later. For US1, the WebSocket handles real-time filtering.
|
||||
logs = await getTaskLogs(taskId);
|
||||
if (autoScroll) {
|
||||
scrollToBottom();
|
||||
}
|
||||
console.log(`[fetchLogs][Coherence:OK] Logs fetched context={{'count': ${logs.length}}}`);
|
||||
} catch (e) {
|
||||
error = e.message;
|
||||
@@ -53,14 +52,35 @@
|
||||
}
|
||||
// [/DEF:fetchLogs:Function]
|
||||
|
||||
function handleFilterChange(event) {
|
||||
const { source, level } = event.detail;
|
||||
selectedSource = source;
|
||||
selectedLevel = level;
|
||||
// Re-fetch or re-filter if needed.
|
||||
// For now, we just log it as the WebSocket will handle real-time updates with filters.
|
||||
console.log(`[TaskLogViewer] Filter changed: source=${source}, level=${level}`);
|
||||
// [DEF:scrollToBottom:Function]
|
||||
/**
|
||||
* @purpose Scrolls the log container to the bottom.
|
||||
* @pre logContainer element must be bound.
|
||||
* @post logContainer scrollTop is set to scrollHeight.
|
||||
*/
|
||||
function scrollToBottom() {
|
||||
if (logContainer) {
|
||||
setTimeout(() => {
|
||||
logContainer.scrollTop = logContainer.scrollHeight;
|
||||
}, 0);
|
||||
}
|
||||
}
|
||||
// [/DEF:scrollToBottom:Function]
|
||||
|
||||
// [DEF:handleScroll:Function]
|
||||
/**
|
||||
* @purpose Updates auto-scroll preference based on scroll position.
|
||||
* @pre logContainer scroll event fired.
|
||||
* @post autoScroll boolean is updated.
|
||||
*/
|
||||
function handleScroll() {
|
||||
if (!logContainer) return;
|
||||
// If user scrolls up, disable auto-scroll
|
||||
const { scrollTop, scrollHeight, clientHeight } = logContainer;
|
||||
const atBottom = scrollHeight - scrollTop - clientHeight < 50;
|
||||
autoScroll = atBottom;
|
||||
}
|
||||
// [/DEF:handleScroll:Function]
|
||||
|
||||
// [DEF:close:Function]
|
||||
/**
|
||||
@@ -74,6 +94,23 @@
|
||||
}
|
||||
// [/DEF:close:Function]
|
||||
|
||||
// [DEF:getLogLevelColor:Function]
|
||||
/**
|
||||
* @purpose Returns the CSS color class for a given log level.
|
||||
* @pre level string is provided.
|
||||
* @post Returns tailwind color class string.
|
||||
*/
|
||||
function getLogLevelColor(level) {
|
||||
switch (level) {
|
||||
case 'INFO': return 'text-blue-600';
|
||||
case 'WARNING': return 'text-yellow-600';
|
||||
case 'ERROR': return 'text-red-600';
|
||||
case 'DEBUG': return 'text-gray-500';
|
||||
default: return 'text-gray-800';
|
||||
}
|
||||
}
|
||||
// [/DEF:getLogLevelColor:Function]
|
||||
|
||||
// React to changes in show/taskId/taskStatus
|
||||
$: if (shouldShow && taskId) {
|
||||
if (interval) clearInterval(interval);
|
||||
@@ -83,7 +120,7 @@
|
||||
error = "";
|
||||
fetchLogs();
|
||||
|
||||
// Poll if task is running (Fallback for when WS is not used)
|
||||
// Poll if task is running
|
||||
if (taskStatus === 'RUNNING' || taskStatus === 'AWAITING_INPUT' || taskStatus === 'AWAITING_MAPPING') {
|
||||
interval = setInterval(fetchLogs, 3000);
|
||||
}
|
||||
@@ -113,18 +150,34 @@
|
||||
<Button variant="ghost" size="sm" on:click={fetchLogs} class="text-blue-600">{$t.tasks?.refresh}</Button>
|
||||
</div>
|
||||
|
||||
<div class="flex-1 min-h-[400px]">
|
||||
<div class="flex-1 border rounded-md bg-gray-50 p-4 overflow-y-auto font-mono text-sm"
|
||||
bind:this={logContainer}
|
||||
on:scroll={handleScroll}>
|
||||
{#if loading && logs.length === 0}
|
||||
<p class="text-gray-500 text-center">{$t.tasks?.loading}</p>
|
||||
{:else if error}
|
||||
<p class="text-red-500 text-center">{error}</p>
|
||||
{:else if logs.length === 0}
|
||||
<p class="text-gray-500 text-center">{$t.tasks?.no_logs}</p>
|
||||
{:else}
|
||||
<TaskLogPanel
|
||||
{taskId}
|
||||
{logs}
|
||||
{autoScroll}
|
||||
on:filterChange={handleFilterChange}
|
||||
/>
|
||||
{#each logs as log}
|
||||
<div class="mb-1 hover:bg-gray-100 p-1 rounded">
|
||||
<span class="text-gray-400 text-xs mr-2">
|
||||
{new Date(log.timestamp).toLocaleTimeString()}
|
||||
</span>
|
||||
<span class="font-bold text-xs mr-2 w-16 inline-block {getLogLevelColor(log.level)}">
|
||||
[{log.level}]
|
||||
</span>
|
||||
<span class="text-gray-800 break-words">
|
||||
{log.message}
|
||||
</span>
|
||||
{#if log.context}
|
||||
<div class="ml-24 text-xs text-gray-500 mt-1 bg-gray-100 p-1 rounded overflow-x-auto">
|
||||
<pre>{JSON.stringify(log.context, null, 2)}</pre>
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
{/each}
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
@@ -140,23 +193,39 @@
|
||||
<div class="bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4">
|
||||
<div class="sm:flex sm:items-start">
|
||||
<div class="mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left w-full">
|
||||
<h3 class="text-lg leading-6 font-medium text-gray-900 flex justify-between items-center mb-4" id="modal-title">
|
||||
<h3 class="text-lg leading-6 font-medium text-gray-900 flex justify-between items-center" id="modal-title">
|
||||
<span>{$t.tasks.logs_title} <span class="text-sm text-gray-500 font-normal">({taskId})</span></span>
|
||||
<Button variant="ghost" size="sm" on:click={fetchLogs} class="text-blue-600">{$t.tasks.refresh}</Button>
|
||||
</h3>
|
||||
|
||||
<div class="h-[500px]">
|
||||
<div class="mt-4 border rounded-md bg-gray-50 p-4 h-96 overflow-y-auto font-mono text-sm"
|
||||
bind:this={logContainer}
|
||||
on:scroll={handleScroll}>
|
||||
{#if loading && logs.length === 0}
|
||||
<p class="text-gray-500 text-center">{$t.tasks.loading}</p>
|
||||
{:else if error}
|
||||
<p class="text-red-500 text-center">{error}</p>
|
||||
{:else if logs.length === 0}
|
||||
<p class="text-gray-500 text-center">{$t.tasks.no_logs}</p>
|
||||
{:else}
|
||||
<TaskLogPanel
|
||||
{taskId}
|
||||
{logs}
|
||||
{autoScroll}
|
||||
on:filterChange={handleFilterChange}
|
||||
/>
|
||||
{#each logs as log}
|
||||
<div class="mb-1 hover:bg-gray-100 p-1 rounded">
|
||||
<span class="text-gray-400 text-xs mr-2">
|
||||
{new Date(log.timestamp).toLocaleTimeString()}
|
||||
</span>
|
||||
<span class="font-bold text-xs mr-2 w-16 inline-block {getLogLevelColor(log.level)}">
|
||||
[{log.level}]
|
||||
</span>
|
||||
<span class="text-gray-800 break-words">
|
||||
{log.message}
|
||||
</span>
|
||||
{#if log.context}
|
||||
<div class="ml-24 text-xs text-gray-500 mt-1 bg-gray-100 p-1 rounded overflow-x-auto">
|
||||
<pre>{JSON.stringify(log.context, null, 2)}</pre>
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
{/each}
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
<!-- [DEF:TaskRunner:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: task, runner, logs, websocket
|
||||
@PURPOSE: Connects to a WebSocket to display real-time logs for a running task with filtering support.
|
||||
@PURPOSE: Connects to a WebSocket to display real-time logs for a running task.
|
||||
@LAYER: UI
|
||||
@RELATION: DEPENDS_ON -> frontend/src/lib/stores.js, frontend/src/components/tasks/TaskLogPanel.svelte
|
||||
@RELATION: DEPENDS_ON -> frontend/src/lib/stores.js
|
||||
|
||||
@PROPS: None
|
||||
@EVENTS: None
|
||||
-->
|
||||
<script>
|
||||
// [SECTION: IMPORTS]
|
||||
@@ -15,7 +17,6 @@
|
||||
import { addToast } from '../lib/toasts.js';
|
||||
import MissingMappingModal from './MissingMappingModal.svelte';
|
||||
import PasswordPrompt from './PasswordPrompt.svelte';
|
||||
import TaskLogPanel from './tasks/TaskLogPanel.svelte';
|
||||
// [/SECTION]
|
||||
|
||||
let ws;
|
||||
@@ -34,12 +35,9 @@
|
||||
let showPasswordPrompt = false;
|
||||
let passwordPromptData = { databases: [], errorMessage: '' };
|
||||
|
||||
let selectedSource = 'all';
|
||||
let selectedLevel = 'all';
|
||||
|
||||
// [DEF:connect:Function]
|
||||
/**
|
||||
* @purpose Establishes WebSocket connection with exponential backoff and filter parameters.
|
||||
* @purpose Establishes WebSocket connection with exponential backoff.
|
||||
* @pre selectedTask must be set in the store.
|
||||
* @post WebSocket instance created and listeners attached.
|
||||
*/
|
||||
@@ -47,21 +45,10 @@
|
||||
const task = get(selectedTask);
|
||||
if (!task || connectionStatus === 'completed') return;
|
||||
|
||||
console.log(`[TaskRunner][Entry] Connecting to logs for task: ${task.id} (Attempt ${reconnectAttempts + 1}) filters: source=${selectedSource}, level=${selectedLevel}`);
|
||||
console.log(`[TaskRunner][Entry] Connecting to logs for task: ${task.id} (Attempt ${reconnectAttempts + 1})`);
|
||||
connectionStatus = 'connecting';
|
||||
|
||||
let wsUrl = getWsUrl(task.id);
|
||||
|
||||
// Append filter parameters to WebSocket URL
|
||||
const params = new URLSearchParams();
|
||||
if (selectedSource !== 'all') params.append('source', selectedSource);
|
||||
if (selectedLevel !== 'all') params.append('level', selectedLevel);
|
||||
|
||||
const queryString = params.toString();
|
||||
if (queryString) {
|
||||
wsUrl += (wsUrl.includes('?') ? '&' : '?') + queryString;
|
||||
}
|
||||
|
||||
const wsUrl = getWsUrl(task.id);
|
||||
ws = new WebSocket(wsUrl);
|
||||
|
||||
ws.onopen = () => {
|
||||
@@ -94,6 +81,7 @@
|
||||
}
|
||||
|
||||
// Check for password request via log context or message
|
||||
// Note: The backend logs "Task paused for user input" with context
|
||||
if (logEntry.message && logEntry.message.includes('Task paused for user input') && logEntry.context && logEntry.context.input_request) {
|
||||
const request = logEntry.context.input_request;
|
||||
if (request.type === 'database_password') {
|
||||
@@ -107,6 +95,8 @@
|
||||
}
|
||||
};
|
||||
|
||||
// Check if task is already awaiting input (e.g. when re-selecting task)
|
||||
// We use the 'task' variable from the outer scope (connect function)
|
||||
if (task && task.status === 'AWAITING_INPUT' && task.input_request && task.input_request.type === 'database_password') {
|
||||
connectionStatus = 'awaiting_input';
|
||||
passwordPromptData = {
|
||||
@@ -141,43 +131,16 @@
|
||||
}
|
||||
// [/DEF:connect:Function]
|
||||
|
||||
// [DEF:handleFilterChange:Function]
|
||||
/**
|
||||
* @purpose Handles filter changes and reconnects WebSocket with new parameters.
|
||||
* @pre event.detail contains source and level filter values.
|
||||
* @post WebSocket reconnected with new filter parameters, logs cleared.
|
||||
*/
|
||||
function handleFilterChange(event) {
|
||||
const { source, level } = event.detail;
|
||||
if (selectedSource === source && selectedLevel === level) return;
|
||||
|
||||
selectedSource = source;
|
||||
selectedLevel = level;
|
||||
|
||||
console.log(`[TaskRunner] Filter changed, reconnecting WebSocket: source=${source}, level=${level}`);
|
||||
|
||||
// Clear current logs when filter changes to avoid confusion
|
||||
taskLogs.set([]);
|
||||
|
||||
if (ws) {
|
||||
ws.close(); // This will trigger reconnection via onclose if not completed
|
||||
} else {
|
||||
connect();
|
||||
}
|
||||
}
|
||||
// [/DEF:handleFilterChange:Function]
|
||||
|
||||
// [DEF:fetchTargetDatabases:Function]
|
||||
/**
|
||||
* @purpose Fetches available databases from target environment for mapping.
|
||||
* @pre selectedTask must have to_env parameter set.
|
||||
* @post targetDatabases array populated with available databases.
|
||||
*/
|
||||
// @PURPOSE: Fetches the list of databases in the target environment.
|
||||
// @PRE: task must be selected and have a target environment parameter.
|
||||
// @POST: targetDatabases array is populated with database objects.
|
||||
async function fetchTargetDatabases() {
|
||||
const task = get(selectedTask);
|
||||
if (!task || !task.params.to_env) return;
|
||||
|
||||
try {
|
||||
// We need to find the environment ID by name first
|
||||
const envs = await api.fetchApi('/environments');
|
||||
const targetEnv = envs.find(e => e.name === task.params.to_env);
|
||||
|
||||
@@ -191,16 +154,15 @@
|
||||
// [/DEF:fetchTargetDatabases:Function]
|
||||
|
||||
// [DEF:handleMappingResolve:Function]
|
||||
/**
|
||||
* @purpose Resolves missing database mapping and continues migration.
|
||||
* @pre event.detail contains sourceDbUuid, targetDbUuid, targetDbName.
|
||||
* @post Mapping created in backend, task resumed with resolution params.
|
||||
*/
|
||||
// @PURPOSE: Handles the resolution of a missing database mapping.
|
||||
// @PRE: event.detail contains sourceDbUuid, targetDbUuid, and targetDbName.
|
||||
// @POST: Mapping is saved and task is resumed.
|
||||
async function handleMappingResolve(event) {
|
||||
const task = get(selectedTask);
|
||||
const { sourceDbUuid, targetDbUuid, targetDbName } = event.detail;
|
||||
|
||||
try {
|
||||
// 1. Save mapping to backend
|
||||
const envs = await api.fetchApi('/environments');
|
||||
const srcEnv = envs.find(e => e.name === task.params.from_env);
|
||||
const tgtEnv = envs.find(e => e.name === task.params.to_env);
|
||||
@@ -214,6 +176,7 @@
|
||||
target_db_name: targetDbName
|
||||
});
|
||||
|
||||
// 2. Resolve task
|
||||
await api.postApi(`/tasks/${task.id}/resolve`, {
|
||||
resolution_params: { resolved_mapping: { [sourceDbUuid]: targetDbUuid } }
|
||||
});
|
||||
@@ -227,11 +190,9 @@
|
||||
// [/DEF:handleMappingResolve:Function]
|
||||
|
||||
// [DEF:handlePasswordResume:Function]
|
||||
/**
|
||||
* @purpose Submits passwords and resumes paused migration task.
|
||||
* @pre event.detail contains passwords object.
|
||||
* @post Task resumed with passwords, connection status restored to connected.
|
||||
*/
|
||||
// @PURPOSE: Handles the submission of database passwords to resume a task.
|
||||
// @PRE: event.detail contains passwords dictionary.
|
||||
// @POST: Task resume endpoint is called with passwords.
|
||||
async function handlePasswordResume(event) {
|
||||
const task = get(selectedTask);
|
||||
const { passwords } = event.detail;
|
||||
@@ -249,11 +210,9 @@
|
||||
// [/DEF:handlePasswordResume:Function]
|
||||
|
||||
// [DEF:startDataTimeout:Function]
|
||||
/**
|
||||
* @purpose Starts timeout timer to detect idle connection.
|
||||
* @pre connectionStatus is 'connected'.
|
||||
* @post waitingForData set to true after 5 seconds if no data received.
|
||||
*/
|
||||
// @PURPOSE: Starts a timeout to detect when the log stream has stalled.
|
||||
// @PRE: None.
|
||||
// @POST: dataTimeout is set to check connection status after 5s.
|
||||
function startDataTimeout() {
|
||||
waitingForData = false;
|
||||
dataTimeout = setTimeout(() => {
|
||||
@@ -265,11 +224,9 @@
|
||||
// [/DEF:startDataTimeout:Function]
|
||||
|
||||
// [DEF:resetDataTimeout:Function]
|
||||
/**
|
||||
* @purpose Resets data timeout timer when new data arrives.
|
||||
* @pre dataTimeout must be set.
|
||||
* @post waitingForData reset to false, new timeout started.
|
||||
*/
|
||||
// @PURPOSE: Resets the data stall timeout.
|
||||
// @PRE: dataTimeout must be active.
|
||||
// @POST: dataTimeout is cleared and restarted.
|
||||
function resetDataTimeout() {
|
||||
clearTimeout(dataTimeout);
|
||||
waitingForData = false;
|
||||
@@ -278,12 +235,11 @@
|
||||
// [/DEF:resetDataTimeout:Function]
|
||||
|
||||
// [DEF:onMount:Function]
|
||||
/**
|
||||
* @purpose Initializes WebSocket connection when component mounts.
|
||||
* @pre Component must be mounted in DOM.
|
||||
* @post WebSocket connection established, subscription to selectedTask active.
|
||||
*/
|
||||
// @PURPOSE: Initializes the component and subscribes to task selection changes.
|
||||
// @PRE: Svelte component is mounting.
|
||||
// @POST: Store subscription is created and returned for cleanup.
|
||||
onMount(() => {
|
||||
// Subscribe to selectedTask changes
|
||||
const unsubscribe = selectedTask.subscribe(task => {
|
||||
if (task) {
|
||||
console.log(`[TaskRunner][Action] Task selected: ${task.id}. Initializing connection.`);
|
||||
@@ -292,6 +248,7 @@
|
||||
reconnectAttempts = 0;
|
||||
connectionStatus = 'disconnected';
|
||||
|
||||
// Initialize logs from the task object if available
|
||||
if (task.logs && Array.isArray(task.logs)) {
|
||||
console.log(`[TaskRunner] Loaded ${task.logs.length} existing logs.`);
|
||||
taskLogs.set(task.logs);
|
||||
@@ -307,6 +264,11 @@
|
||||
// [/DEF:onMount:Function]
|
||||
|
||||
// [DEF:onDestroy:Function]
|
||||
/**
|
||||
* @purpose Close WebSocket connection when the component is destroyed.
|
||||
* @pre Component is being destroyed.
|
||||
* @post WebSocket is closed and timeouts are cleared.
|
||||
*/
|
||||
onDestroy(() => {
|
||||
clearTimeout(reconnectTimeout);
|
||||
clearTimeout(dataTimeout);
|
||||
@@ -368,16 +330,26 @@
|
||||
</details>
|
||||
</div>
|
||||
|
||||
<div class="h-[500px]">
|
||||
<TaskLogPanel
|
||||
taskId={$selectedTask.id}
|
||||
logs={$taskLogs}
|
||||
autoScroll={true}
|
||||
on:filterChange={handleFilterChange}
|
||||
/>
|
||||
<div class="bg-gray-900 text-white font-mono text-sm p-4 rounded-md h-96 overflow-y-auto relative shadow-inner">
|
||||
{#if $taskLogs.length === 0}
|
||||
<div class="text-gray-500 italic text-center mt-10">No logs available for this task.</div>
|
||||
{/if}
|
||||
{#each $taskLogs as log}
|
||||
<div class="hover:bg-gray-800 px-1 rounded">
|
||||
<span class="text-gray-500 select-none text-xs w-20 inline-block">{new Date(log.timestamp).toLocaleTimeString()}</span>
|
||||
<span class="{log.level === 'ERROR' ? 'text-red-500 font-bold' : log.level === 'WARNING' ? 'text-yellow-400' : 'text-green-400'} w-16 inline-block">[{log.level}]</span>
|
||||
<span>{log.message}</span>
|
||||
{#if log.context}
|
||||
<details class="ml-24">
|
||||
<summary class="text-xs text-gray-500 cursor-pointer hover:text-gray-300">Context</summary>
|
||||
<pre class="text-xs text-gray-400 pl-2 border-l border-gray-700 mt-1">{JSON.stringify(log.context, null, 2)}</pre>
|
||||
</details>
|
||||
{/if}
|
||||
</div>
|
||||
{/each}
|
||||
|
||||
{#if waitingForData && connectionStatus === 'connected'}
|
||||
<div class="text-gray-500 italic mt-2 animate-pulse text-xs">
|
||||
<div class="text-gray-500 italic mt-2 animate-pulse border-t border-gray-800 pt-2">
|
||||
Waiting for new logs...
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:Toast:Component] -->
|
||||
<!--
|
||||
@TIER: TRIVIAL
|
||||
@SEMANTICS: toast, notification, feedback, ui
|
||||
@PURPOSE: Displays transient notifications (toasts) in the bottom-right corner.
|
||||
@LAYER: UI
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:ProtectedRoute:Component] -->
|
||||
<!--
|
||||
@TIER: TRIVIAL
|
||||
@SEMANTICS: auth, guard, route, protection
|
||||
@PURPOSE: Wraps content to ensure only authenticated users can access it.
|
||||
@LAYER: Component
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:CommitModal:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: git, commit, modal, version_control, diff
|
||||
@PURPOSE: Модальное окно для создания коммита с просмотром изменений (diff).
|
||||
@LAYER: Component
|
||||
|
||||
@@ -75,15 +75,8 @@
|
||||
const method = editingProvider ? 'PUT' : 'POST';
|
||||
const endpoint = editingProvider ? `/llm/providers/${editingProvider.id}` : '/llm/providers';
|
||||
|
||||
// When editing, only include api_key if user entered a new one
|
||||
const submitData = { ...formData };
|
||||
if (editingProvider && !submitData.api_key) {
|
||||
// If editing and api_key is empty, don't send it (backend will keep existing)
|
||||
delete submitData.api_key;
|
||||
}
|
||||
|
||||
try {
|
||||
await requestApi(endpoint, method, submitData);
|
||||
await requestApi(endpoint, method, formData);
|
||||
showForm = false;
|
||||
resetForm();
|
||||
onSave();
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
<!-- [DEF:LogEntryRow:Component] -->
|
||||
<!-- @SEMANTICS: log, entry, row, ui, svelte -->
|
||||
<!-- @PURPOSE: Optimized row rendering for a single log entry with color coding and progress bar support. -->
|
||||
<!-- @TIER: STANDARD -->
|
||||
<!-- @LAYER: UI -->
|
||||
<!-- @UX_STATE: Idle -> (displays log entry) -->
|
||||
|
||||
<script>
|
||||
/** @type {Object} log - The log entry object */
|
||||
export let log;
|
||||
/** @type {boolean} showSource - Whether to show the source tag */
|
||||
export let showSource = true;
|
||||
|
||||
// Format timestamp for display
|
||||
$: formattedTime = formatTime(log.timestamp);
|
||||
|
||||
function formatTime(timestamp) {
|
||||
if (!timestamp) return '';
|
||||
const date = new Date(timestamp);
|
||||
return date.toLocaleTimeString('en-US', {
|
||||
hour12: false,
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
second: '2-digit'
|
||||
});
|
||||
}
|
||||
|
||||
// Get level class for styling
|
||||
$: levelClass = getLevelClass(log.level);
|
||||
|
||||
function getLevelClass(level) {
|
||||
switch (level?.toUpperCase()) {
|
||||
case 'DEBUG': return 'level-debug';
|
||||
case 'INFO': return 'level-info';
|
||||
case 'WARNING': return 'level-warning';
|
||||
case 'ERROR': return 'level-error';
|
||||
default: return 'level-info';
|
||||
}
|
||||
}
|
||||
|
||||
// Get source class for styling
|
||||
$: sourceClass = getSourceClass(log.source);
|
||||
|
||||
function getSourceClass(source) {
|
||||
if (!source) return 'source-default';
|
||||
return `source-${source.toLowerCase().replace(/[^a-z0-9]/g, '-')}`;
|
||||
}
|
||||
|
||||
// Check if log has progress metadata
|
||||
$: hasProgress = log.metadata?.progress !== undefined;
|
||||
$: progressPercent = log.metadata?.progress || 0;
|
||||
</script>
|
||||
|
||||
<div class="log-entry-row {levelClass}" class:has-progress={hasProgress}>
|
||||
<span class="log-time">{formattedTime}</span>
|
||||
<span class="log-level {levelClass}">{log.level || 'INFO'}</span>
|
||||
{#if showSource}
|
||||
<span class="log-source {sourceClass}">{log.source || 'system'}</span>
|
||||
{/if}
|
||||
<span class="log-message">
|
||||
{log.message}
|
||||
{#if hasProgress}
|
||||
<div class="progress-bar-container">
|
||||
<div class="progress-bar" style="width: {progressPercent}%"></div>
|
||||
<span class="progress-text">{progressPercent.toFixed(0)}%</span>
|
||||
</div>
|
||||
{/if}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.log-entry-row {
|
||||
display: grid;
|
||||
grid-template-columns: 80px 70px auto 1fr;
|
||||
gap: 0.75rem;
|
||||
padding: 0.375rem 0.75rem;
|
||||
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||
font-size: 0.8125rem;
|
||||
border-bottom: 1px solid #1e293b;
|
||||
align-items: start;
|
||||
}
|
||||
|
||||
.log-entry-row.has-progress {
|
||||
grid-template-columns: 80px 70px auto 1fr;
|
||||
}
|
||||
|
||||
.log-entry-row:hover {
|
||||
background-color: rgba(30, 41, 59, 0.5);
|
||||
}
|
||||
|
||||
/* Alternating row backgrounds handled by parent */
|
||||
|
||||
.log-time {
|
||||
color: #64748b;
|
||||
font-size: 0.75rem;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.log-level {
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
font-size: 0.6875rem;
|
||||
padding: 0.125rem 0.375rem;
|
||||
border-radius: 0.25rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.level-debug {
|
||||
color: #64748b;
|
||||
background-color: rgba(100, 116, 139, 0.2);
|
||||
}
|
||||
|
||||
.level-info {
|
||||
color: #3b82f6;
|
||||
background-color: rgba(59, 130, 246, 0.15);
|
||||
}
|
||||
|
||||
.level-warning {
|
||||
color: #f59e0b;
|
||||
background-color: rgba(245, 158, 11, 0.15);
|
||||
}
|
||||
|
||||
.level-error {
|
||||
color: #ef4444;
|
||||
background-color: rgba(239, 68, 68, 0.15);
|
||||
}
|
||||
|
||||
.log-source {
|
||||
font-size: 0.6875rem;
|
||||
padding: 0.125rem 0.375rem;
|
||||
border-radius: 0.25rem;
|
||||
background-color: rgba(100, 116, 139, 0.2);
|
||||
color: #94a3b8;
|
||||
white-space: nowrap;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
max-width: 120px;
|
||||
}
|
||||
|
||||
.source-plugin {
|
||||
background-color: rgba(34, 197, 94, 0.15);
|
||||
color: #22c55e;
|
||||
}
|
||||
|
||||
.source-superset-api, .source-superset_api {
|
||||
background-color: rgba(168, 85, 247, 0.15);
|
||||
color: #a855f7;
|
||||
}
|
||||
|
||||
.source-git {
|
||||
background-color: rgba(249, 115, 22, 0.15);
|
||||
color: #f97316;
|
||||
}
|
||||
|
||||
.source-system {
|
||||
background-color: rgba(59, 130, 246, 0.15);
|
||||
color: #3b82f6;
|
||||
}
|
||||
|
||||
.log-message {
|
||||
color: #e2e8f0;
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
.progress-bar-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
margin-top: 0.25rem;
|
||||
background-color: #1e293b;
|
||||
border-radius: 0.25rem;
|
||||
overflow: hidden;
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.progress-bar {
|
||||
background: linear-gradient(90deg, #3b82f6, #8b5cf6);
|
||||
height: 100%;
|
||||
transition: width 0.3s ease;
|
||||
}
|
||||
|
||||
.progress-text {
|
||||
font-size: 0.625rem;
|
||||
color: #94a3b8;
|
||||
padding: 0 0.25rem;
|
||||
position: absolute;
|
||||
right: 0.25rem;
|
||||
}
|
||||
|
||||
.progress-bar-container {
|
||||
position: relative;
|
||||
}
|
||||
</style>
|
||||
|
||||
<!-- [/DEF:LogEntryRow:Component] -->
|
||||
@@ -1,161 +0,0 @@
|
||||
<!-- [DEF:LogFilterBar:Component] -->
|
||||
<!-- @SEMANTICS: log, filter, ui, svelte -->
|
||||
<!-- @PURPOSE: UI component for filtering logs by level, source, and text search. -->
|
||||
<!-- @TIER: STANDARD -->
|
||||
<!-- @LAYER: UI -->
|
||||
<!-- @UX_STATE: Idle -> FilterChanged -> (parent applies filter) -->
|
||||
|
||||
<script>
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
|
||||
// Props
|
||||
/** @type {string[]} availableSources - List of available source options */
|
||||
export let availableSources = [];
|
||||
/** @type {string} selectedLevel - Currently selected log level filter */
|
||||
export let selectedLevel = '';
|
||||
/** @type {string} selectedSource - Currently selected source filter */
|
||||
export let selectedSource = '';
|
||||
/** @type {string} searchText - Current search text */
|
||||
export let searchText = '';
|
||||
|
||||
const dispatch = createEventDispatcher();
|
||||
|
||||
// Log level options
|
||||
const levelOptions = [
|
||||
{ value: '', label: 'All Levels' },
|
||||
{ value: 'DEBUG', label: 'Debug' },
|
||||
{ value: 'INFO', label: 'Info' },
|
||||
{ value: 'WARNING', label: 'Warning' },
|
||||
{ value: 'ERROR', label: 'Error' }
|
||||
];
|
||||
|
||||
// Handle filter changes
|
||||
function handleLevelChange(event) {
|
||||
selectedLevel = event.target.value;
|
||||
dispatch('filter-change', { level: selectedLevel, source: selectedSource, search: searchText });
|
||||
}
|
||||
|
||||
function handleSourceChange(event) {
|
||||
selectedSource = event.target.value;
|
||||
dispatch('filter-change', { level: selectedLevel, source: selectedSource, search: searchText });
|
||||
}
|
||||
|
||||
function handleSearchChange(event) {
|
||||
searchText = event.target.value;
|
||||
dispatch('filter-change', { level: selectedLevel, source: selectedSource, search: searchText });
|
||||
}
|
||||
|
||||
function clearFilters() {
|
||||
selectedLevel = '';
|
||||
selectedSource = '';
|
||||
searchText = '';
|
||||
dispatch('filter-change', { level: '', source: '', search: '' });
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="log-filter-bar">
|
||||
<div class="filter-group">
|
||||
<label for="level-filter" class="filter-label">Level:</label>
|
||||
<select id="level-filter" class="filter-select" value={selectedLevel} on:change={handleLevelChange}>
|
||||
{#each levelOptions as option}
|
||||
<option value={option.value}>{option.label}</option>
|
||||
{/each}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-group">
|
||||
<label for="source-filter" class="filter-label">Source:</label>
|
||||
<select id="source-filter" class="filter-select" value={selectedSource} on:change={handleSourceChange}>
|
||||
<option value="">All Sources</option>
|
||||
{#each availableSources as source}
|
||||
<option value={source}>{source}</option>
|
||||
{/each}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-group search-group">
|
||||
<label for="search-filter" class="filter-label">Search:</label>
|
||||
<input
|
||||
id="search-filter"
|
||||
type="text"
|
||||
class="filter-input"
|
||||
placeholder="Search logs..."
|
||||
value={searchText}
|
||||
on:input={handleSearchChange}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{#if selectedLevel || selectedSource || searchText}
|
||||
<button class="clear-btn" on:click={clearFilters}>
|
||||
Clear Filters
|
||||
</button>
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.log-filter-bar {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 1rem;
|
||||
align-items: center;
|
||||
padding: 0.75rem;
|
||||
background-color: #1e293b;
|
||||
border-radius: 0.5rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.filter-group {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.filter-label {
|
||||
font-size: 0.875rem;
|
||||
color: #94a3b8;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.filter-select, .filter-input {
|
||||
background-color: #334155;
|
||||
color: #e2e8f0;
|
||||
border: 1px solid #475569;
|
||||
border-radius: 0.375rem;
|
||||
padding: 0.5rem 0.75rem;
|
||||
font-size: 0.875rem;
|
||||
min-width: 120px;
|
||||
}
|
||||
|
||||
.filter-select:focus, .filter-input:focus {
|
||||
outline: none;
|
||||
border-color: #3b82f6;
|
||||
box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.2);
|
||||
}
|
||||
|
||||
.search-group {
|
||||
flex: 1;
|
||||
min-width: 200px;
|
||||
}
|
||||
|
||||
.filter-input {
|
||||
width: 100%;
|
||||
max-width: 300px;
|
||||
}
|
||||
|
||||
.clear-btn {
|
||||
background-color: #475569;
|
||||
color: #e2e8f0;
|
||||
border: none;
|
||||
border-radius: 0.375rem;
|
||||
padding: 0.5rem 1rem;
|
||||
font-size: 0.875rem;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.clear-btn:hover {
|
||||
background-color: #64748b;
|
||||
}
|
||||
</style>
|
||||
|
||||
<!-- [/DEF:LogFilterBar:Component] -->
|
||||
@@ -1,119 +0,0 @@
|
||||
<!-- [DEF:TaskLogPanel:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: task, log, panel, filter, list
|
||||
@PURPOSE: Combines log filtering and display into a single cohesive panel.
|
||||
@LAYER: UI
|
||||
@RELATION: USES -> frontend/src/components/tasks/LogFilterBar.svelte
|
||||
@RELATION: USES -> frontend/src/components/tasks/LogEntryRow.svelte
|
||||
@INVARIANT: Must always display logs in chronological order and respect auto-scroll preference.
|
||||
-->
|
||||
<script>
|
||||
import { createEventDispatcher, onMount, afterUpdate } from 'svelte';
|
||||
import LogFilterBar from './LogFilterBar.svelte';
|
||||
import LogEntryRow from './LogEntryRow.svelte';
|
||||
|
||||
/**
|
||||
* @PURPOSE: Component properties and state.
|
||||
* @PRE: taskId is a valid string, logs is an array of LogEntry objects.
|
||||
* @UX_STATE: [Empty] -> Displays "No logs available" message.
|
||||
* @UX_STATE: [Populated] -> Displays list of LogEntryRow components.
|
||||
* @UX_STATE: [AutoScroll] -> Automatically scrolls to bottom on new logs.
|
||||
*/
|
||||
export let taskId = '';
|
||||
export let logs = [];
|
||||
export let autoScroll = true;
|
||||
|
||||
const dispatch = createEventDispatcher();
|
||||
let scrollContainer;
|
||||
let selectedSource = 'all';
|
||||
let selectedLevel = 'all';
|
||||
|
||||
/**
|
||||
* @PURPOSE: Handles filter changes from LogFilterBar.
|
||||
* @PRE: event.detail contains source and level.
|
||||
* @POST: Dispatches filterChange event to parent.
|
||||
* @SIDE_EFFECT: Updates local filter state.
|
||||
*/
|
||||
function handleFilterChange(event) {
|
||||
const { source, level } = event.detail;
|
||||
selectedSource = source;
|
||||
selectedLevel = level;
|
||||
console.log(`[TaskLogPanel][STATE] Filter changed: source=${source}, level=${level}`);
|
||||
dispatch('filterChange', { source, level });
|
||||
}
|
||||
|
||||
/**
|
||||
* @PURPOSE: Scrolls the log container to the bottom.
|
||||
* @PRE: autoScroll is true and scrollContainer is bound.
|
||||
* @POST: scrollContainer.scrollTop is set to scrollHeight.
|
||||
*/
|
||||
function scrollToBottom() {
|
||||
if (autoScroll && scrollContainer) {
|
||||
scrollContainer.scrollTop = scrollContainer.scrollHeight;
|
||||
}
|
||||
}
|
||||
|
||||
afterUpdate(() => {
|
||||
scrollToBottom();
|
||||
});
|
||||
|
||||
onMount(() => {
|
||||
scrollToBottom();
|
||||
});
|
||||
</script>
|
||||
|
||||
<div class="flex flex-col h-full bg-gray-900 text-gray-100 rounded-lg overflow-hidden border border-gray-700">
|
||||
<!-- Header / Filter Bar -->
|
||||
<div class="p-2 bg-gray-800 border-b border-gray-700">
|
||||
<LogFilterBar
|
||||
{taskId}
|
||||
on:filter={handleFilterChange}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<!-- Log List -->
|
||||
<div
|
||||
bind:this={scrollContainer}
|
||||
class="flex-1 overflow-y-auto p-2 font-mono text-sm space-y-0.5"
|
||||
>
|
||||
{#if logs.length === 0}
|
||||
<div class="text-gray-500 italic text-center py-4">
|
||||
No logs available for this task.
|
||||
</div>
|
||||
{:else}
|
||||
{#each logs as log}
|
||||
<LogEntryRow {log} />
|
||||
{/each}
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<!-- Footer / Stats -->
|
||||
<div class="px-3 py-1 bg-gray-800 border-t border-gray-700 text-xs text-gray-400 flex justify-between items-center">
|
||||
<span>Total: {logs.length} entries</span>
|
||||
{#if autoScroll}
|
||||
<span class="text-green-500 flex items-center gap-1">
|
||||
<span class="w-2 h-2 bg-green-500 rounded-full animate-pulse"></span>
|
||||
Auto-scroll active
|
||||
</span>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<style>
|
||||
/* Custom scrollbar for the log container */
|
||||
div::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
}
|
||||
div::-webkit-scrollbar-track {
|
||||
background: #1f2937;
|
||||
}
|
||||
div::-webkit-scrollbar-thumb {
|
||||
background: #4b5563;
|
||||
border-radius: 4px;
|
||||
}
|
||||
div::-webkit-scrollbar-thumb:hover {
|
||||
background: #6b7280;
|
||||
}
|
||||
</style>
|
||||
<!-- [/DEF:TaskLogPanel:Component] -->
|
||||
@@ -1,5 +1,4 @@
|
||||
// [DEF:api_module:Module]
|
||||
// @TIER: STANDARD
|
||||
// @SEMANTICS: api, client, fetch, rest
|
||||
// @PURPOSE: Handles all communication with the backend API.
|
||||
// @LAYER: Infra-API
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// [DEF:stores_module:Module]
|
||||
// @TIER: STANDARD
|
||||
// @SEMANTICS: state, stores, svelte, plugins, tasks
|
||||
// @PURPOSE: Global state management using Svelte stores.
|
||||
// @LAYER: UI-State
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
<!-- [DEF:AdminSettingsPage:Component] -->
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: admin, adfs, mappings, configuration, logging
|
||||
@PURPOSE: UI for configuring Active Directory Group to local Role mappings for ADFS SSO and logging settings.
|
||||
@SEMANTICS: admin, adfs, mappings, configuration
|
||||
@PURPOSE: UI for configuring Active Directory Group to local Role mappings for ADFS SSO.
|
||||
@LAYER: Feature
|
||||
@RELATION: DEPENDS_ON -> frontend.src.services.adminService
|
||||
@RELATION: DEPENDS_ON -> frontend.src.components.auth.ProtectedRoute
|
||||
@@ -29,19 +28,6 @@
|
||||
role_id: ''
|
||||
};
|
||||
|
||||
// [SECTION: LOGGING_CONFIG]
|
||||
let loggingConfig = {
|
||||
level: 'INFO',
|
||||
task_log_level: 'INFO',
|
||||
enable_belief_state: true
|
||||
};
|
||||
let loggingConfigLoading = false;
|
||||
let loggingConfigSaving = false;
|
||||
let loggingConfigSaved = false;
|
||||
// [/SECTION]
|
||||
|
||||
const LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR'];
|
||||
|
||||
// [DEF:loadData:Function]
|
||||
/**
|
||||
* @purpose Fetches AD mappings and roles from the backend to populate the UI.
|
||||
@@ -107,64 +93,7 @@
|
||||
}
|
||||
// [/DEF:handleCreateMapping:Function]
|
||||
|
||||
// [DEF:loadLoggingConfig:Function]
|
||||
/**
|
||||
* @purpose Fetches current logging configuration from the backend.
|
||||
* @pre Component is mounted and user has active session.
|
||||
* @post loggingConfig variable is updated with backend data.
|
||||
* @returns {Promise<void>}
|
||||
* @relation CALLS -> adminService.getLoggingConfig
|
||||
*/
|
||||
async function loadLoggingConfig() {
|
||||
console.log('[AdminSettingsPage][loadLoggingConfig][Entry]');
|
||||
loggingConfigLoading = true;
|
||||
try {
|
||||
const config = await adminService.getLoggingConfig();
|
||||
loggingConfig = {
|
||||
level: config.level || 'INFO',
|
||||
task_log_level: config.task_log_level || 'INFO',
|
||||
enable_belief_state: config.enable_belief_state ?? true
|
||||
};
|
||||
console.log('[AdminSettingsPage][loadLoggingConfig][Coherence:OK]');
|
||||
} catch (e) {
|
||||
console.error('[AdminSettingsPage][loadLoggingConfig][Coherence:Failed]', e);
|
||||
} finally {
|
||||
loggingConfigLoading = false;
|
||||
}
|
||||
}
|
||||
// [/DEF:loadLoggingConfig:Function]
|
||||
|
||||
// [DEF:saveLoggingConfig:Function]
|
||||
/**
|
||||
* @purpose Saves logging configuration to the backend.
|
||||
* @pre loggingConfig contains valid values.
|
||||
* @post Configuration is saved and feedback is shown.
|
||||
* @returns {Promise<void>}
|
||||
* @relation CALLS -> adminService.updateLoggingConfig
|
||||
*/
|
||||
async function saveLoggingConfig() {
|
||||
console.log('[AdminSettingsPage][saveLoggingConfig][Entry]');
|
||||
loggingConfigSaving = true;
|
||||
loggingConfigSaved = false;
|
||||
try {
|
||||
await adminService.updateLoggingConfig(loggingConfig);
|
||||
loggingConfigSaved = true;
|
||||
console.log('[AdminSettingsPage][saveLoggingConfig][Coherence:OK]');
|
||||
// Reset saved indicator after 2 seconds
|
||||
setTimeout(() => { loggingConfigSaved = false; }, 2000);
|
||||
} catch (e) {
|
||||
alert("Failed to save logging configuration: " + (e.message || "Unknown error"));
|
||||
console.error('[AdminSettingsPage][saveLoggingConfig][Coherence:Failed]', e);
|
||||
} finally {
|
||||
loggingConfigSaving = false;
|
||||
}
|
||||
}
|
||||
// [/DEF:saveLoggingConfig:Function]
|
||||
|
||||
onMount(() => {
|
||||
loadData();
|
||||
loadLoggingConfig();
|
||||
});
|
||||
onMount(loadData);
|
||||
</script>
|
||||
|
||||
<ProtectedRoute requiredPermission="admin:settings">
|
||||
@@ -226,74 +155,6 @@
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
<!-- [SECTION: LOGGING_CONFIG_UI] -->
|
||||
<div class="mt-8 bg-white shadow rounded-lg border border-gray-200 p-6">
|
||||
<h2 class="text-xl font-bold mb-4 text-gray-800">Logging Configuration</h2>
|
||||
|
||||
{#if loggingConfigLoading}
|
||||
<div class="flex justify-center py-4">
|
||||
<p class="text-gray-500 animate-pulse">Loading logging configuration...</p>
|
||||
</div>
|
||||
{:else}
|
||||
<div class="space-y-4">
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 mb-1">Application Log Level</label>
|
||||
<select
|
||||
bind:value={loggingConfig.level}
|
||||
class="w-full border border-gray-300 p-2 rounded focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
|
||||
>
|
||||
{#each LOG_LEVELS as level}
|
||||
<option value={level}>{level}</option>
|
||||
{/each}
|
||||
</select>
|
||||
<p class="text-xs text-gray-500 mt-1">Controls the verbosity of application logs.</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 mb-1">Task Log Level</label>
|
||||
<select
|
||||
bind:value={loggingConfig.task_log_level}
|
||||
class="w-full border border-gray-300 p-2 rounded focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
|
||||
>
|
||||
{#each LOG_LEVELS as level}
|
||||
<option value={level}>{level}</option>
|
||||
{/each}
|
||||
</select>
|
||||
<p class="text-xs text-gray-500 mt-1">Minimum level for logs stored in task history. DEBUG shows all logs.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="enable_belief_state"
|
||||
bind:checked={loggingConfig.enable_belief_state}
|
||||
class="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded"
|
||||
/>
|
||||
<label for="enable_belief_state" class="ml-2 block text-sm text-gray-700">
|
||||
Enable Belief State Logging (Entry/Exit/Coherence logs)
|
||||
</label>
|
||||
</div>
|
||||
<p class="text-xs text-gray-500 -mt-2">When disabled, belief scope logs are hidden. Requires DEBUG level to see in task logs.</p>
|
||||
|
||||
<div class="flex items-center gap-3 pt-2">
|
||||
<button
|
||||
on:click={saveLoggingConfig}
|
||||
disabled={loggingConfigSaving}
|
||||
class="px-4 py-2 bg-blue-600 text-white rounded font-medium hover:bg-blue-700 disabled:bg-gray-400 disabled:cursor-not-allowed transition-colors"
|
||||
>
|
||||
{loggingConfigSaving ? 'Saving...' : 'Save Configuration'}
|
||||
</button>
|
||||
{#if loggingConfigSaved}
|
||||
<span class="text-green-600 text-sm font-medium">✓ Saved</span>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
<!-- [/SECTION: LOGGING_CONFIG_UI] -->
|
||||
|
||||
{#if showCreateModal}
|
||||
<div class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center p-4 z-50">
|
||||
<div class="bg-white rounded-lg shadow-xl p-6 max-w-md w-full">
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
<!-- [DEF:LLMSettingsPage:Component] -->
|
||||
<!--
|
||||
<!--
|
||||
@TIER: STANDARD
|
||||
@SEMANTICS: admin, llm, settings, provider, configuration
|
||||
@PURPOSE: Admin settings page for LLM provider configuration.
|
||||
@LAYER: UI
|
||||
@RELATION: CALLS -> frontend/src/components/llm/ProviderConfig.svelte
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:DebugPage:Component] -->
|
||||
<!--
|
||||
@TIER: TRIVIAL
|
||||
@SEMANTICS: debug, page, tool
|
||||
@PURPOSE: Page for system diagnostics and debugging.
|
||||
@LAYER: UI
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<!-- [DEF:MapperPage:Component] -->
|
||||
<!--
|
||||
@TIER: TRIVIAL
|
||||
@SEMANTICS: mapper, page, tool
|
||||
@PURPOSE: Page for the dataset column mapper tool.
|
||||
@LAYER: UI
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
// [DEF:loadFiles:Function]
|
||||
/**
|
||||
* @purpose Fetches the list of files from the server.
|
||||
* @pre The activeTab is set to a valid category.
|
||||
* @post Updates the `files` array with the latest data.
|
||||
*/
|
||||
let files = [];
|
||||
@@ -34,7 +33,6 @@
|
||||
let currentPath = 'backups'; // Relative to storage root
|
||||
|
||||
async function loadFiles() {
|
||||
console.log('[STORAGE-PAGE][LOAD_START] category=%s path=%s', activeTab, currentPath);
|
||||
isLoading = true;
|
||||
try {
|
||||
const category = activeTab;
|
||||
@@ -53,9 +51,7 @@
|
||||
: effectivePath;
|
||||
|
||||
files = await listFiles(category, subpath);
|
||||
console.log('[STORAGE-PAGE][LOAD_OK] count=%d', files.length);
|
||||
} catch (error) {
|
||||
console.log('[STORAGE-PAGE][LOAD_ERR] error=%s', error.message);
|
||||
addToast($t.storage.messages.load_failed.replace('{error}', error.message), 'error');
|
||||
} finally {
|
||||
isLoading = false;
|
||||
@@ -66,22 +62,17 @@
|
||||
// [DEF:handleDelete:Function]
|
||||
/**
|
||||
* @purpose Handles the file deletion process.
|
||||
* @pre The event contains valid category and path.
|
||||
* @post File is deleted and file list is refreshed.
|
||||
* @param {CustomEvent} event - The delete event containing category and path.
|
||||
*/
|
||||
async function handleDelete(event) {
|
||||
const { category, path, name } = event.detail;
|
||||
console.log('[STORAGE-PAGE][DELETE_START] category=%s path=%s', category, path);
|
||||
if (!confirm($t.storage.messages.delete_confirm.replace('{name}', name))) return;
|
||||
|
||||
try {
|
||||
await deleteFile(category, path);
|
||||
console.log('[STORAGE-PAGE][DELETE_OK] name=%s', name);
|
||||
addToast($t.storage.messages.delete_success.replace('{name}', name), 'success');
|
||||
await loadFiles();
|
||||
} catch (error) {
|
||||
console.log('[STORAGE-PAGE][DELETE_ERR] error=%s', error.message);
|
||||
addToast($t.storage.messages.delete_failed.replace('{error}', error.message), 'error');
|
||||
}
|
||||
}
|
||||
@@ -90,12 +81,9 @@
|
||||
// [DEF:handleNavigate:Function]
|
||||
/**
|
||||
* @purpose Updates the current path and reloads files when navigating into a directory.
|
||||
* @pre The event contains a valid path string.
|
||||
* @post currentPath is updated and files are reloaded.
|
||||
* @param {CustomEvent} event - The navigation event containing the new path.
|
||||
*/
|
||||
function handleNavigate(event) {
|
||||
console.log('[STORAGE-PAGE][NAVIGATE] path=%s', event.detail);
|
||||
currentPath = event.detail;
|
||||
loadFiles();
|
||||
}
|
||||
|
||||
@@ -58,8 +58,6 @@ async function createUser(userData) {
|
||||
// [DEF:getRoles:Function]
|
||||
/**
|
||||
* @purpose Fetches all available system roles.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post Returns an array of role objects.
|
||||
* @returns {Promise<Array>}
|
||||
* @relation CALLS -> backend.src.api.routes.admin.list_roles
|
||||
*/
|
||||
@@ -79,8 +77,6 @@ async function getRoles() {
|
||||
// [DEF:getADGroupMappings:Function]
|
||||
/**
|
||||
* @purpose Fetches mappings between AD groups and local roles.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post Returns an array of AD group mapping objects.
|
||||
* @returns {Promise<Array>}
|
||||
*/
|
||||
async function getADGroupMappings() {
|
||||
@@ -99,8 +95,6 @@ async function getADGroupMappings() {
|
||||
// [DEF:createADGroupMapping:Function]
|
||||
/**
|
||||
* @purpose Creates or updates an AD group to Role mapping.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post New or updated mapping created in auth.db.
|
||||
* @param {Object} mappingData - Mapping details (ad_group, role_id).
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
@@ -120,8 +114,6 @@ async function createADGroupMapping(mappingData) {
|
||||
// [DEF:updateUser:Function]
|
||||
/**
|
||||
* @purpose Updates an existing user.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post User record updated in auth.db.
|
||||
* @param {string} userId - Target user ID.
|
||||
* @param {Object} userData - Updated user data.
|
||||
* @returns {Promise<Object>}
|
||||
@@ -142,8 +134,6 @@ async function updateUser(userId, userData) {
|
||||
// [DEF:deleteUser:Function]
|
||||
/**
|
||||
* @purpose Deletes a user.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post User record removed from auth.db.
|
||||
* @param {string} userId - Target user ID.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
@@ -162,8 +152,6 @@ async function deleteUser(userId) {
|
||||
// [DEF:createRole:Function]
|
||||
/**
|
||||
* @purpose Creates a new role.
|
||||
* @pre User must be authenticated with Admin privileges.
|
||||
* @post New role created in auth.db.
|
||||
* @param {Object} roleData - Role details (name, description, permissions).
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
@@ -236,45 +224,6 @@ async function getPermissions() {
|
||||
}
|
||||
// [/DEF:getPermissions:Function]
|
||||
|
||||
// [DEF:getLoggingConfig:Function]
|
||||
/**
|
||||
* @purpose Fetches current logging configuration.
|
||||
* @returns {Promise<Object>} - Logging config with level, task_log_level, enable_belief_state.
|
||||
* @relation CALLS -> backend.src.api.routes.settings.get_logging_config
|
||||
*/
|
||||
async function getLoggingConfig() {
|
||||
console.log('[getLoggingConfig][Entry]');
|
||||
try {
|
||||
const config = await api.requestApi('/settings/logging', 'GET');
|
||||
console.log('[getLoggingConfig][Coherence:OK]');
|
||||
return config;
|
||||
} catch (e) {
|
||||
console.error('[getLoggingConfig][Coherence:Failed]', e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
// [/DEF:getLoggingConfig:Function]
|
||||
|
||||
// [DEF:updateLoggingConfig:Function]
|
||||
/**
|
||||
* @purpose Updates logging configuration.
|
||||
* @param {Object} configData - Logging config (level, task_log_level, enable_belief_state).
|
||||
* @returns {Promise<Object>}
|
||||
* @relation CALLS -> backend.src.api.routes.settings.update_logging_config
|
||||
*/
|
||||
async function updateLoggingConfig(configData) {
|
||||
console.log('[updateLoggingConfig][Entry]');
|
||||
try {
|
||||
const config = await api.requestApi('/settings/logging', 'PATCH', configData);
|
||||
console.log('[updateLoggingConfig][Coherence:OK]');
|
||||
return config;
|
||||
} catch (e) {
|
||||
console.error('[updateLoggingConfig][Coherence:Failed]', e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
// [/DEF:updateLoggingConfig:Function]
|
||||
|
||||
export const adminService = {
|
||||
getUsers,
|
||||
createUser,
|
||||
@@ -286,9 +235,7 @@ export const adminService = {
|
||||
deleteRole,
|
||||
getPermissions,
|
||||
getADGroupMappings,
|
||||
createADGroupMapping,
|
||||
getLoggingConfig,
|
||||
updateLoggingConfig
|
||||
createADGroupMapping
|
||||
};
|
||||
|
||||
// [/DEF:adminService:Module]
|
||||
@@ -1,6 +1,5 @@
|
||||
// [DEF:GitServiceClient:Module]
|
||||
/**
|
||||
* @TIER: STANDARD
|
||||
* @SEMANTICS: git, service, api, client
|
||||
* @PURPOSE: API client for Git operations, managing the communication between frontend and backend.
|
||||
* @LAYER: Service
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// [DEF:storageService:Module]
|
||||
/**
|
||||
* @TIER: STANDARD
|
||||
* @purpose Frontend API client for file storage management.
|
||||
* @layer Service
|
||||
* @relation DEPENDS_ON -> backend.api.storage
|
||||
@@ -9,25 +8,6 @@
|
||||
|
||||
const API_BASE = '/api/storage';
|
||||
|
||||
// [DEF:getStorageAuthHeaders:Function]
|
||||
/**
|
||||
* @purpose Returns headers with Authorization for storage API calls.
|
||||
* @returns {Object} Headers object with Authorization if token exists.
|
||||
* @NOTE Unlike api.js getAuthHeaders, this doesn't set Content-Type
|
||||
* to allow FormData to set its own multipart boundary.
|
||||
*/
|
||||
function getStorageAuthHeaders() {
|
||||
const headers = {};
|
||||
if (typeof window !== 'undefined') {
|
||||
const token = localStorage.getItem('auth_token');
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
// [/DEF:getStorageAuthHeaders:Function]
|
||||
|
||||
// [DEF:listFiles:Function]
|
||||
/**
|
||||
* @purpose Fetches the list of files for a given category and subpath.
|
||||
@@ -45,9 +25,7 @@ export async function listFiles(category, path) {
|
||||
if (path) {
|
||||
params.append('path', path);
|
||||
}
|
||||
const response = await fetch(`${API_BASE}/files?${params.toString()}`, {
|
||||
headers: getStorageAuthHeaders()
|
||||
});
|
||||
const response = await fetch(`${API_BASE}/files?${params.toString()}`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch files: ${response.statusText}`);
|
||||
}
|
||||
@@ -75,7 +53,6 @@ export async function uploadFile(file, category, path) {
|
||||
|
||||
const response = await fetch(`${API_BASE}/upload`, {
|
||||
method: 'POST',
|
||||
headers: getStorageAuthHeaders(),
|
||||
body: formData
|
||||
});
|
||||
|
||||
@@ -98,8 +75,7 @@ export async function uploadFile(file, category, path) {
|
||||
*/
|
||||
export async function deleteFile(category, path) {
|
||||
const response = await fetch(`${API_BASE}/files/${category}/${path}`, {
|
||||
method: 'DELETE',
|
||||
headers: getStorageAuthHeaders()
|
||||
method: 'DELETE'
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -117,8 +93,6 @@ export async function deleteFile(category, path) {
|
||||
* @returns {string}
|
||||
* @PRE category and path must identify an existing file.
|
||||
* @POST Returns a valid API URL for file download.
|
||||
* @NOTE Downloads use browser navigation, so auth is handled via cookies
|
||||
* or the backend must allow unauthenticated downloads for valid paths.
|
||||
*/
|
||||
export function downloadFileUrl(category, path) {
|
||||
return `${API_BASE}/download/${category}/${path}`;
|
||||
|
||||
@@ -66,14 +66,11 @@
|
||||
3. **TRIVIAL** (DTO/**Atoms**):
|
||||
- Требование: Только Якоря [DEF] и @PURPOSE.
|
||||
|
||||
#### VI. ЛОГИРОВАНИЕ (BELIEF STATE & TASK LOGS)
|
||||
Цель: Трассировка для самокоррекции и пользовательский мониторинг.
|
||||
Python:
|
||||
- Системные логи: Context Manager `with belief_scope("ID"):`.
|
||||
- Логи задач: `context.logger.info("msg", source="component")`.
|
||||
#### VI. ЛОГИРОВАНИЕ (BELIEF STATE)
|
||||
Цель: Трассировка для самокоррекции.
|
||||
Python: Context Manager `with belief_scope("ID"):`.
|
||||
Svelte: `console.log("[ID][STATE] Msg")`.
|
||||
Состояния: Entry -> Action -> Coherence:OK / Failed -> Exit.
|
||||
Инвариант: Каждый лог задачи должен иметь атрибут `source` для фильтрации.
|
||||
|
||||
#### VII. АЛГОРИТМ ГЕНЕРАЦИИ
|
||||
1. АНАЛИЗ. Оцени TIER, слой и UX-требования.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,7 +27,6 @@ Stores the outcome of a dashboard validation task.
|
||||
| timestamp | DateTime | Yes | When the validation ran |
|
||||
| status | Enum | Yes | `PASS`, `WARN`, `FAIL` |
|
||||
| screenshot_path | String | No | Path to the captured screenshot (if stored) |
|
||||
| screenshot_metadata | JSON | No | `{width: 1920, height: dynamic, tabs_processed: []}` |
|
||||
| issues | JSON | Yes | List of detected issues `[{severity, message, location}]` |
|
||||
| raw_response | Text | No | Full LLM response for debugging |
|
||||
|
||||
|
||||
@@ -23,25 +23,6 @@ This feature implements two new plugins for the `ss-tools` platform: `DashboardV
|
||||
**Constraints**: Must integrate with existing `PluginBase` and `TaskManager`. Secure storage for API keys.
|
||||
**Scale/Scope**: Support for configurable LLM providers, scheduled tasks, and notification integration.
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Screenshot Capture Implementation
|
||||
The screenshot service uses Playwright with Chrome DevTools Protocol (CDP) to avoid font loading timeouts in headless mode. Key implementation details:
|
||||
- **Full Page Capture**: Uses CDP `Page.captureScreenshot` with `captureBeyondViewport: true` and `fromSurface: true`
|
||||
- **Tab Switching**: Implements recursive tab switching to trigger lazy-loaded chart rendering on multi-tab dashboards
|
||||
- **Authentication**: Uses direct UI login flow (navigating to `/login/` and filling credentials) instead of API cookie injection for better reliability
|
||||
- **Resolution**: 1920px width with dynamic full page height calculation
|
||||
|
||||
### Authentication Flow
|
||||
The service authenticates via Playwright UI login rather than API authentication:
|
||||
1. Navigate to `/login/` page
|
||||
2. Fill username/password fields (supports multiple Superset versions)
|
||||
3. Click login button
|
||||
4. Verify successful redirect to `/superset/welcome/`
|
||||
5. Navigate to dashboard with valid session
|
||||
|
||||
This approach is more reliable than API-to-UI cookie injection which was causing 403 Forbidden errors.
|
||||
|
||||
## Constitution Check
|
||||
|
||||
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
|
||||
@@ -79,8 +60,7 @@ backend/
|
||||
│ │ ├── llm_analysis/ # New Plugin Directory
|
||||
│ │ │ ├── __init__.py
|
||||
│ │ │ ├── plugin.py # Implements DashboardValidationPlugin & DocumentationPlugin
|
||||
│ │ │ ├── service.py # LLM interaction logic + ScreenshotService (CDP, tab switching)
|
||||
│ │ │ ├── scheduler.py # Scheduled validation task handler
|
||||
│ │ │ ├── service.py # LLM interaction logic
|
||||
│ │ │ └── models.py # Pydantic models for LLM config/results
|
||||
│ │ └── git/ # Existing Git Plugin
|
||||
│ │ └── ... # Update for commit message generation
|
||||
|
||||
@@ -82,8 +82,6 @@ As a Developer, I want the system to suggest commit messages based on changes di
|
||||
- What happens if the dashboard screenshot cannot be generated? (System should proceed with logs only or fail depending on configuration).
|
||||
- What happens if the context (logs/metadata) exceeds the LLM's token limit? (System should truncate or summarize input).
|
||||
- How does the system handle missing API keys? (Task should fail immediately with a configuration error).
|
||||
- What happens if the dashboard has multiple tabs with lazy-loaded charts? (System must switch through all tabs recursively to trigger chart rendering before capture).
|
||||
- What happens if Playwright encounters font loading timeouts in headless mode? (System must use CDP `Page.captureScreenshot` to bypass Playwright's internal timeout mechanism).
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
@@ -94,11 +92,9 @@ As a Developer, I want the system to suggest commit messages based on changes di
|
||||
- **FR-028**: The system MUST mask all API keys in the UI and logs, displaying only the last 4 characters (e.g., `sk-...1234`). [Security]
|
||||
- **FR-003**: System MUST implement a `DashboardValidationPlugin` that integrates with the existing `PluginBase` architecture.
|
||||
- **FR-004**: `DashboardValidationPlugin` MUST accept a dashboard identifier as input.
|
||||
- **FR-005**: `DashboardValidationPlugin` MUST be capable of retrieving a visual representation (screenshot) of the dashboard. The visual representation MUST be a PNG image with a resolution of 1920px width and full page height to ensure all dashboard content is captured. [Clarity]
|
||||
- **FR-005**: `DashboardValidationPlugin` MUST be capable of retrieving a visual representation (screenshot) of the dashboard. The visual representation MUST be a PNG or JPEG image with a minimum resolution of 1280x720px to ensure legibility for the LLM. [Clarity]
|
||||
- **FR-016**: System MUST support configurable screenshot strategies: 'Headless Browser' (default, high accuracy) and 'API Thumbnail' (fallback/fast).
|
||||
- **FR-030**: The screenshot capture MUST use Playwright with Chrome DevTools Protocol (CDP) to avoid font loading timeouts in headless mode.
|
||||
- **FR-031**: The screenshot capture MUST implement recursive tab switching to trigger lazy-loaded chart rendering on multi-tab dashboards before capturing.
|
||||
- **FR-006**: `DashboardValidationPlugin` MUST retrieve recent execution logs associated with the dashboard from the Environment API (e.g., `/api/v1/log/`), limited to the last 100 lines or 24 hours (whichever is smaller) to prevent token overflow. [Reliability]
|
||||
- **FR-006**: `DashboardValidationPlugin` MUST retrieve recent execution logs associated with the dashboard, limited to the last 100 lines or 24 hours (whichever is smaller) to prevent token overflow. [Reliability]
|
||||
- **FR-007**: `DashboardValidationPlugin` MUST combine visual and text data to prompt a Multimodal LLM for analysis. The analysis output MUST be structured as a JSON object containing `status` (Pass/Fail), `issues` (list of strings), and `summary` (text) to enable structured UI presentation. [Clarity]
|
||||
- **FR-008**: System MUST implement a `DocumentationPlugin` (or similar) for documenting datasets and dashboards.
|
||||
- **FR-009**: `DocumentationPlugin` MUST retrieve schema and metadata for the target asset.
|
||||
@@ -134,6 +130,4 @@ As a Developer, I want the system to suggest commit messages based on changes di
|
||||
- **SC-001**: Users can successfully configure and validate a connection to at least one LLM provider.
|
||||
- **SC-002**: A dashboard validation task completes within 90 seconds (assuming standard LLM latency).
|
||||
- **SC-003**: The system successfully processes a multimodal prompt (image + text) and returns a structured analysis.
|
||||
- **SC-004**: Generated documentation for a standard dataset contains descriptions for at least 80% of the columns (based on LLM capability, but pipeline must support it).
|
||||
- **SC-005**: Screenshots capture full dashboard content including all tabs (1920px width, full height) without font loading timeouts.
|
||||
- **SC-006**: Analysis results are displayed in task logs with clear `[ANALYSIS_SUMMARY]` and `[ANALYSIS_ISSUE]` markers for easy parsing.
|
||||
- **SC-004**: Generated documentation for a standard dataset contains descriptions for at least 80% of the columns (based on LLM capability, but pipeline must support it).
|
||||
@@ -30,7 +30,7 @@
|
||||
**Goal**: Implement core services and shared infrastructure.
|
||||
|
||||
- [x] T008 Implement `LLMProviderService` in `backend/src/services/llm_provider.py` (CRUD for providers, AES-256 encryption)
|
||||
- [x] T009 Implement `ScreenshotService` in `backend/src/plugins/llm_analysis/service.py` (Playwright + API strategies, fallback logic, 1920px width, full page height, CDP screenshot, recursive tab switching)
|
||||
- [x] T009 Implement `ScreenshotService` in `backend/src/plugins/llm_analysis/service.py` (Playwright + API strategies, fallback logic, min 1280x720px resolution)
|
||||
- [x] T010 Implement `LLMClient` in `backend/src/plugins/llm_analysis/service.py` (OpenAI SDK wrapper, retry logic with tenacity, rate limit handling)
|
||||
- [x] T011 Create `backend/src/plugins/llm_analysis/plugin.py` with `PluginBase` implementation stubs
|
||||
- [x] T012 Define database schema updates for `LLMProviderConfig` in `backend/src/models/llm.py` (or appropriate location)
|
||||
@@ -43,7 +43,7 @@
|
||||
- [x] T014 [US1] Implement `validate_dashboard` task logic in `backend/src/plugins/llm_analysis/plugin.py`
|
||||
- [x] T015 [US1] Implement log retrieval logic (fetch recent logs, limit 100 lines/24h) in `backend/src/plugins/llm_analysis/plugin.py`
|
||||
- [x] T016 [US1] Construct multimodal prompt (image + text) in `backend/src/plugins/llm_analysis/service.py` (implement PII/credential filtering)
|
||||
- [x] T017 [US1] Implement result parsing and persistence (ValidationResult) in `backend/src/plugins/llm_analysis/plugin.py` (ensure JSON structure: status, issues, summary, log results to task output with `[ANALYSIS_SUMMARY]` and `[ANALYSIS_ISSUE]` markers)
|
||||
- [x] T017 [US1] Implement result parsing and persistence (ValidationResult) in `backend/src/plugins/llm_analysis/plugin.py` (ensure JSON structure: status, issues, summary)
|
||||
- [x] T018 [US1] Add `validate` endpoint trigger in `backend/src/api/routes/tasks.py` (or reuse existing dispatch)
|
||||
- [x] T019 [US1] Implement notification dispatch (Email/Pulse) on failure in `backend/src/plugins/llm_analysis/plugin.py` (Summary + Link format)
|
||||
- [x] T020 [US1] Create `frontend/src/components/llm/ValidationReport.svelte` for viewing results
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
# Specification Quality Checklist: Task Logging System (Separated Per-Task Logs)
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: 2026-02-07
|
||||
**Feature**: [specs/018-task-logging-v2/spec.md](specs/018-task-logging-v2/spec.md)
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [x] No implementation details (languages, frameworks, APIs)
|
||||
- [x] Focused on user value and business needs
|
||||
- [x] Written for non-technical stakeholders
|
||||
- [x] All mandatory sections completed
|
||||
|
||||
## UX Consistency
|
||||
|
||||
- [x] Functional requirements fully support the 'Happy Path' in ux_reference.md
|
||||
- [x] Error handling requirements match the 'Error Experience' in ux_reference.md
|
||||
- [x] No requirements contradict the defined User Persona or Context
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [x] No [NEEDS CLARIFICATION] markers remain
|
||||
- [x] Requirements are testable and unambiguous
|
||||
- [x] Success criteria are measurable
|
||||
- [x] Success criteria are technology-agnostic (no implementation details)
|
||||
- [x] All acceptance scenarios are defined
|
||||
- [x] Edge cases are identified
|
||||
- [x] Scope is clearly bounded
|
||||
- [x] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [x] All functional requirements have clear acceptance criteria
|
||||
- [x] User scenarios cover primary flows
|
||||
- [x] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [x] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- The specification is based on the provided technical draft but has been abstracted to focus on functional requirements and user value.
|
||||
- Implementation details like "FastAPI", "SQLAlchemy", or "SvelteKit" are excluded from the spec as per guidelines.
|
||||
@@ -1,103 +0,0 @@
|
||||
# Technical Plan: Task Logging System (Separated Per-Task Logs)
|
||||
|
||||
**Feature Branch**: `018-task-logging-v2`
|
||||
**Specification**: [`specs/018-task-logging-v2/spec.md`](specs/018-task-logging-v2/spec.md)
|
||||
**Created**: 2026-02-07
|
||||
**Status**: Draft
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
The system will transition from in-memory JSON logging to a persistent, source-attributed logging architecture.
|
||||
|
||||
### 1.1. Component Diagram
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph Backend
|
||||
P[Plugin] -->|uses| TC[TaskContext]
|
||||
TC -->|delegates| TL[TaskLogger]
|
||||
TL -->|calls| TM[TaskManager]
|
||||
TM -->|buffers| LB[LogBuffer]
|
||||
LB -->|periodic flush| LPS[LogPersistenceService]
|
||||
LPS -->|SQL| DB[(SQLite: task_logs)]
|
||||
TM -->|broadcast| WS[WebSocket Server]
|
||||
end
|
||||
subgraph Frontend
|
||||
WS -->|stream| TLP[TaskLogPanel]
|
||||
API[REST API] -->|fetch history| TLP
|
||||
TLP -->|render| UI[Log UI]
|
||||
end
|
||||
```
|
||||
|
||||
## 2. Database Schema
|
||||
|
||||
A new table `task_logs` will be added to the primary database.
|
||||
|
||||
| Column | Type | Constraints |
|
||||
|--------|------|-------------|
|
||||
| `id` | Integer | Primary Key, Autoincrement |
|
||||
| `task_id` | String | Foreign Key (tasks.id), Indexed |
|
||||
| `timestamp` | DateTime | Not Null, Indexed |
|
||||
| `level` | String(16) | Not Null (INFO, WARNING, ERROR, DEBUG) |
|
||||
| `source` | String(64) | Not Null, Default: 'system' |
|
||||
| `message` | Text | Not Null |
|
||||
| `metadata_json` | Text | Nullable (JSON string) |
|
||||
|
||||
## 3. Backend Implementation Details
|
||||
|
||||
### 3.1. `TaskLogger` & `TaskContext`
|
||||
- `TaskLogger`: A wrapper around `TaskManager._add_log` that carries `task_id` and `source`.
|
||||
- `TaskContext`: A container passed to `plugin.execute()` providing the logger and other task-specific utilities.
|
||||
|
||||
### 3.2. `TaskManager` Enhancements
|
||||
- **Log Buffer**: A dictionary mapping `task_id` to a list of pending `LogEntry` objects.
|
||||
- **Flusher Thread**: A background daemon thread that flushes the buffer to the database every 2 seconds.
|
||||
- **Backward Compatibility**: Use `inspect.signature` to detect if a plugin's `execute` method accepts the new `context` parameter.
|
||||
|
||||
### 3.3. API Endpoints
|
||||
- `GET /api/tasks/{task_id}/logs`: Supports `level`, `source`, `search`, `offset`, and `limit`.
|
||||
- `GET /api/tasks/{task_id}/logs/stats`: Returns counts by level and source.
|
||||
- `GET /api/tasks/{task_id}/logs/sources`: Returns unique sources for the task.
|
||||
|
||||
## 4. Frontend Implementation Details
|
||||
|
||||
### 4.1. Components
|
||||
- `TaskLogPanel`: Main container managing state and data fetching.
|
||||
- `LogFilterBar`: UI for selecting filters.
|
||||
- `LogEntryRow`: Optimized row rendering with color coding and progress bar support.
|
||||
|
||||
### 4.2. Data Fetching
|
||||
- **Live Tasks**: Connect via WebSocket with filter parameters in the query string.
|
||||
- **Completed Tasks**: Fetch via REST API with pagination.
|
||||
|
||||
## 5. Implementation Phases
|
||||
|
||||
### Phase 1: Foundation
|
||||
- Database migration (new table).
|
||||
- `LogPersistenceService` implementation.
|
||||
- `TaskLogger` and `TaskContext` classes.
|
||||
|
||||
### Phase 2: Core Integration
|
||||
- `TaskManager` buffer and flusher logic.
|
||||
- API endpoint updates.
|
||||
- WebSocket server-side filtering.
|
||||
|
||||
### Phase 3: Plugin Migration
|
||||
- Update core plugins (Backup, Migration, Git) to use the new logger.
|
||||
- Verify backward compatibility with un-migrated plugins.
|
||||
|
||||
### Phase 4: Frontend
|
||||
- Implement new Svelte components.
|
||||
- Integrate with updated API and WebSocket.
|
||||
|
||||
## 6. Verification Plan
|
||||
|
||||
- **Unit Tests**:
|
||||
- `TaskLogger` correctly routes to `TaskManager`.
|
||||
- `LogPersistenceService` handles batch inserts and complex filters.
|
||||
- `TaskManager` flushes logs on task completion.
|
||||
- **Integration Tests**:
|
||||
- End-to-end flow from plugin log to frontend display.
|
||||
- Verification of log persistence after server restart.
|
||||
- **Performance Tests**:
|
||||
- Measure overhead of batch logging under high load (1000+ logs/sec).
|
||||
@@ -1,123 +0,0 @@
|
||||
# Feature Specification: Task Logging System (Separated Per-Task Logs)
|
||||
|
||||
**Feature Branch**: `018-task-logging-v2`
|
||||
**Reference UX**: `specs/018-task-logging-v2/ux_reference.md`
|
||||
**Created**: 2026-02-07
|
||||
**Status**: Draft
|
||||
**Input**: User description: "Implement a separated per-task logging system with source attribution, persistence, and frontend filtering."
|
||||
|
||||
## User Scenarios & Testing *(mandatory)*
|
||||
|
||||
### User Story 1 - Real-time Filtered Logging (Priority: P1)
|
||||
|
||||
As a user, I want to see logs from a running task filtered by their source so that I can focus on specific component behavior without noise.
|
||||
|
||||
**Why this priority**: This is the core value proposition—isolating component logs during execution.
|
||||
|
||||
**Independent Test**: Start a task that emits logs from multiple sources (e.g., `plugin` and `system`), apply a source filter in the UI, and verify only matching logs are displayed in real-time.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a task is running and emitting logs from "plugin" and "superset_api", **When** I select "superset_api" in the Source filter, **Then** only logs with the "superset_api" tag are visible.
|
||||
2. **Given** a filter is active, **When** a new log matching the filter arrives via WebSocket, **Then** it is immediately appended to the view.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 - Persistent Log History (Priority: P1)
|
||||
|
||||
As a user, I want my task logs to be saved permanently so that I can review them even after the server restarts.
|
||||
|
||||
**Why this priority**: Current logs are in-memory and lost on restart, which is a major pain point for debugging past failures.
|
||||
|
||||
**Independent Test**: Run a task, restart the backend server, and verify that the logs for that task are still accessible via the UI.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a task has completed, **When** I restart the server and navigate to the task details, **Then** all logs are loaded from the database.
|
||||
2. **Given** a task was deleted via the cleanup service, **When** I check the database, **Then** all associated logs are also removed.
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 - Component Attribution (Priority: P2)
|
||||
|
||||
As a developer, I want to use a dedicated logger that automatically tags my logs with the correct source.
|
||||
|
||||
**Why this priority**: Simplifies plugin development and ensures consistent data for filtering.
|
||||
|
||||
**Independent Test**: Update a plugin to use `context.logger.with_source("custom")` and verify that logs appear with the "custom" tag.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a plugin uses the new `TaskContext`, **When** it calls `logger.info()`, **Then** the log is recorded with `source="plugin"` by default.
|
||||
2. **Given** a plugin creates a sub-logger with `with_source("api")`, **When** it logs a message, **Then** the log is recorded with `source="api"`.
|
||||
|
||||
---
|
||||
|
||||
### User Story 4 - Configurable Logging Levels (Priority: P1)
|
||||
|
||||
As an admin, I want to configure logging levels through the frontend so that I can control verbosity and filter noise during debugging.
|
||||
|
||||
**Why this priority**: Essential for production debugging and performance tuning.
|
||||
|
||||
**Independent Test**: Change the log level from INFO to DEBUG in admin settings, run a task, and verify that DEBUG-level logs (including belief_scope) now appear.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** I am an admin user, **When** I navigate to Admin Settings > Logging, **Then** I see options for log level, task log level, and belief_scope toggle.
|
||||
2. **Given** belief_scope logging is enabled, **When** I set log level to DEBUG, **Then** all belief_scope Entry/Exit/Coherence logs are visible.
|
||||
3. **Given** belief_scope logging is enabled, **When** I set log level to INFO, **Then** belief_scope logs are hidden (they are DEBUG level).
|
||||
4. **Given** I set task_log_level to WARNING, **When** a task runs, **Then** only WARNING and ERROR logs are persisted for that task.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- **High Volume Logs**: How does the system handle a task generating 10,000+ logs in a few seconds? (Requirement: Batching and virtual scrolling).
|
||||
- **Database Connection Loss**: What happens if the log flusher cannot write to the DB? (Requirement: Logs should remain in-memory as a fallback until the next flush).
|
||||
- **Concurrent Tasks**: Ensure logs from Task A never leak into the view or database records of Task B.
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: System MUST persist task logs in a dedicated `task_logs` database table.
|
||||
- **FR-002**: Each log entry MUST include: `task_id`, `timestamp`, `level`, `message`, `source`, and optional `metadata`.
|
||||
- **FR-003**: System MUST provide a `TaskLogger` via a `TaskContext` to all plugins.
|
||||
- **FR-004**: System MUST support batch-writing logs to the database (e.g., every 2 seconds) to maintain performance.
|
||||
- **FR-005**: Backend MUST support server-side filtering of logs by `level`, `source`, and text `search` via REST and WebSocket.
|
||||
- **FR-006**: Frontend MUST provide a UI for filtering and searching logs within the task details view.
|
||||
- **FR-007**: System MUST maintain backward compatibility for plugins using the old `execute` signature.
|
||||
- **FR-008**: System MUST automatically delete logs when their associated task is deleted.
|
||||
- **FR-009**: Task logs MUST follow the same retention policy as task records (logs are kept as long as the task record exists).
|
||||
- **FR-010**: The default log level filter in the UI MUST be set to INFO and above (hiding DEBUG logs by default).
|
||||
- **FR-011**: System MUST support filtering logs by specific keys within the `metadata` JSON (e.g., `dashboard_id`, `database_uuid`).
|
||||
- **FR-012**: System MUST separate log levels clearly: DEBUG (development/tracing), INFO (normal operations), WARNING (potential issues), ERROR (failures).
|
||||
- **FR-013**: `belief_scope` context manager MUST log at DEBUG level (not INFO) to reduce noise in production.
|
||||
- **FR-014**: Admin users MUST be able to configure logging levels through the frontend settings UI.
|
||||
- **FR-015**: System MUST support separate log level configuration for application logs vs task-specific logs.
|
||||
- **FR-016**: All plugins MUST support `TaskContext` for proper source attribution and log level filtering.
|
||||
|
||||
## Clarifications
|
||||
|
||||
### Session 2026-02-07
|
||||
- Q: Should task logs follow the same retention period as the task records themselves? → A: Same as Task Records.
|
||||
- Q: What should be the default log level filter when a user opens the task log panel? → A: INFO and above.
|
||||
- Q: Do we need to support searching or filtering inside the JSON metadata? → A: Searchable keys (e.g., `dashboard_id` should be filterable).
|
||||
|
||||
### Key Entities *(include if feature involves data)*
|
||||
|
||||
- **TaskLog**: Represents a single log message.
|
||||
- Attributes: `id` (PK), `task_id` (FK), `timestamp`, `level` (Enum/String), `source` (String), `message` (Text), `metadata` (JSON).
|
||||
- **TaskContext**: Execution context provided to plugins.
|
||||
- Attributes: `task_id`, `logger`, `config`.
|
||||
|
||||
## Success Criteria *(mandatory)*
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: Log retrieval for a task with 1,000 entries takes less than 200ms.
|
||||
- **SC-002**: Database write overhead for logging does not increase task execution time by more than 5%.
|
||||
- **SC-003**: 100% of logs generated by a task are available after a server restart.
|
||||
- **SC-004**: Users can filter logs by source and see results in under 100ms on the frontend.
|
||||
- **SC-005**: Admin users can change logging levels via frontend UI without server restart.
|
||||
- **SC-006**: All plugins use TaskContext for logging with proper source attribution.
|
||||
@@ -1,260 +0,0 @@
|
||||
# Tasks: Task Logging System (Separated Per-Task Logs)
|
||||
|
||||
**Input**: Design documents from `/specs/018-task-logging-v2/`
|
||||
**Prerequisites**: plan.md (required), spec.md (required for user stories), ux_reference.md (required)
|
||||
|
||||
**Tests**: Test tasks are included as per the verification plan in plan.md.
|
||||
|
||||
**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
|
||||
|
||||
## Format: `[ID] [P?] [Story] Description`
|
||||
|
||||
- **[P]**: Can run in parallel (different files, no dependencies)
|
||||
- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
|
||||
- Include exact file paths in descriptions
|
||||
|
||||
## Path Conventions
|
||||
|
||||
- **Web app**: `backend/src/`, `frontend/src/`
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Setup (Shared Infrastructure)
|
||||
|
||||
**Purpose**: Project initialization and basic structure
|
||||
|
||||
- [x] T001 Create database migration for `task_logs` table in `backend/src/models/task.py`
|
||||
- [x] T002 [P] Define `LogEntry` and `TaskLog` schemas in `backend/src/core/task_manager/models.py`
|
||||
- [x] T003 [P] Create `TaskLogger` class in `backend/src/core/task_manager/task_logger.py`
|
||||
- [x] T004 [P] Create `TaskContext` class in `backend/src/core/task_manager/context.py`
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Foundational (Blocking Prerequisites)
|
||||
|
||||
**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented
|
||||
|
||||
**⚠️ CRITICAL**: No user story work can begin until this phase is complete
|
||||
|
||||
- [x] T005 Implement `TaskLogPersistenceService` in `backend/src/core/task_manager/persistence.py`
|
||||
- [x] T006 Update `TaskManager` to include log buffer and flusher thread in `backend/src/core/task_manager/manager.py`
|
||||
- [x] T007 Implement `_flush_logs` and `_add_log` (new signature) in `backend/src/core/task_manager/manager.py`
|
||||
- [x] T008 Update `_run_task` to support `TaskContext` and backward compatibility in `backend/src/core/task_manager/manager.py`
|
||||
- [x] T009 [P] Update `TaskCleanupService` to delete logs in `backend/src/core/task_manager/cleanup.py`
|
||||
|
||||
**Checkpoint**: Foundation ready - user story implementation can now begin in parallel
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 2 - Persistent Log History (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: Ensure logs are saved to the database and accessible after server restart.
|
||||
|
||||
**Independent Test**: Run a task, restart the backend, and verify logs are retrieved via `GET /api/tasks/{task_id}/logs`.
|
||||
|
||||
### Implementation for User Story 2
|
||||
|
||||
- [x] T010 [P] [US2] Implement `GET /api/tasks/{task_id}/logs` endpoint in `backend/src/api/routes/tasks.py`
|
||||
- [x] T011 [P] [US2] Implement `GET /api/tasks/{task_id}/logs/stats` and `/sources` in `backend/src/api/routes/tasks.py`
|
||||
- [x] T012 [US2] Update `get_task_logs` in `TaskManager` to fetch from persistence for completed tasks in `backend/src/core/task_manager/manager.py`
|
||||
- [x] T013 [US2] Verify implementation matches ux_reference.md (Happy Path & Errors)
|
||||
- **VERIFIED (2026-02-07)**: Happy Path works - logs persist after server restart via `TaskLogPersistenceService`
|
||||
|
||||
**Checkpoint**: User Story 2 complete - logs are persistent and accessible via API.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 1 - Real-time Filtered Logging (Priority: P1)
|
||||
|
||||
**Goal**: Real-time log streaming with server-side filtering.
|
||||
|
||||
**Independent Test**: Connect to WebSocket with `?source=plugin` and verify only plugin logs are received.
|
||||
|
||||
### Implementation for User Story 1
|
||||
|
||||
- [x] T014 [US1] Update WebSocket endpoint in `backend/src/app.py` to support `source` and `level` query parameters
|
||||
- [x] T015 [US1] Implement server-side filtering logic for WebSocket broadcast in `backend/src/core/task_manager/manager.py`
|
||||
- [x] T016 [P] [US1] Create `LogFilterBar` component in `frontend/src/components/tasks/LogFilterBar.svelte`
|
||||
- [x] T017 [P] [US1] Create `LogEntryRow` component in `frontend/src/components/tasks/LogEntryRow.svelte`
|
||||
- [x] T018 [US1] Create `TaskLogPanel` component in `frontend/src/components/tasks/TaskLogPanel.svelte`
|
||||
- [x] T019 [US1] Refactor `TaskLogViewer` to use `TaskLogPanel` in `frontend/src/components/TaskLogViewer.svelte`
|
||||
- [x] T020 [US1] Update `TaskRunner` to pass filter parameters to WebSocket in `frontend/src/components/TaskRunner.svelte`
|
||||
- [x] T021 [US1] Verify implementation matches ux_reference.md (Happy Path & Errors)
|
||||
- **VERIFIED (2026-02-07)**: Happy Path works - real-time filtering via WebSocket with source/level params
|
||||
- **ISSUE**: Error Experience incomplete - missing "Reconnecting..." indicator and "Retry" button
|
||||
|
||||
**Checkpoint**: User Story 1 complete - real-time filtered logging is functional.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: User Story 3 - Component Attribution (Priority: P2)
|
||||
|
||||
**Goal**: Migrate plugins to use the new `TaskContext` and `TaskLogger`.
|
||||
|
||||
**Independent Test**: Run a migrated plugin and verify logs have correct `source` tags in the UI.
|
||||
|
||||
### Implementation for User Story 3
|
||||
|
||||
- [x] T022 [P] [US3] Migrate `BackupPlugin` to use `TaskContext` in `backend/src/plugins/backup.py`
|
||||
- [x] T023 [P] [US3] Migrate `MigrationPlugin` to use `TaskContext` in `backend/src/plugins/migration.py`
|
||||
- [x] T024 [P] [US3] Migrate `GitPlugin` to use `TaskContext` in `backend/src/plugins/git_plugin.py`
|
||||
- [x] T025 [US3] Verify implementation matches ux_reference.md (Happy Path & Errors)
|
||||
- **VERIFICATION RESULT (2026-02-07)**: Plugin migration complete. All three plugins now support TaskContext with source attribution:
|
||||
- BackupPlugin: Uses `context.logger.with_source("superset_api")` and `context.logger.with_source("storage")`
|
||||
- MigrationPlugin: Uses `context.logger.with_source("superset_api")` and `context.logger.with_source("migration")`
|
||||
- GitPlugin: Uses `context.logger.with_source("git")` and `context.logger.with_source("superset_api")`
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Polish & Cross-Cutting Concerns
|
||||
|
||||
**Purpose**: Improvements that affect multiple user stories
|
||||
|
||||
- [x] T026 [P] Add unit tests for `LogPersistenceService` in `backend/tests/test_log_persistence.py`
|
||||
- [x] T027 [P] Add unit tests for `TaskLogger` and `TaskContext` in `backend/tests/test_task_logger.py`
|
||||
- [x] T028 [P] Update `docs/plugin_dev.md` with new logging instructions
|
||||
- [x] T029 Final verification of all success criteria (SC-001 to SC-004)
|
||||
- **VERIFICATION RESULT (2026-02-07)**: All success criteria verified:
|
||||
- SC-001: Logs are persisted to database ✓
|
||||
- SC-002: Logs are retrievable via API ✓
|
||||
- SC-003: Logs support source attribution ✓
|
||||
- SC-004: Real-time filtering works via WebSocket ✓
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Execution Order
|
||||
|
||||
### Phase Dependencies
|
||||
|
||||
- **Setup (Phase 1)**: No dependencies.
|
||||
- **Foundational (Phase 2)**: Depends on Phase 1.
|
||||
- **User Stories (Phase 3-5)**: Depend on Phase 2. US2 (Persistence) is prioritized as it's the foundation for historical logs.
|
||||
- **Polish (Phase 6)**: Depends on all user stories.
|
||||
|
||||
### Parallel Opportunities
|
||||
|
||||
- T002, T003, T004 can run in parallel.
|
||||
- T010, T011 can run in parallel.
|
||||
- T016, T017 can run in parallel.
|
||||
- Plugin migrations (T022-T024) can run in parallel.
|
||||
|
||||
---
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### MVP First (User Story 2)
|
||||
|
||||
1. Complete Setup & Foundational phases.
|
||||
2. Implement US2 (Persistence & API).
|
||||
3. **STOP and VALIDATE**: Verify logs are saved and retrieved after restart.
|
||||
|
||||
### Incremental Delivery
|
||||
|
||||
1. Add US1 (Real-time & UI) → Test filtering.
|
||||
2. Add US3 (Plugin Migration) → Test source attribution.
|
||||
|
||||
---
|
||||
|
||||
## Phase 7: Logging Levels & Configuration (Priority: P1)
|
||||
|
||||
**Goal**: Improve logging granularity with proper level separation and frontend configuration.
|
||||
|
||||
**Purpose**:
|
||||
- Explicitly separate log levels (DEBUG, INFO, WARNING, ERROR) across all components
|
||||
- Move belief_scope logging to DEBUG level to reduce noise
|
||||
- Add admin-configurable log level settings in frontend
|
||||
- Ensure all plugins support TaskContext for proper logging
|
||||
|
||||
### Backend Changes
|
||||
|
||||
- [X] T030 [P] [US4] Update `belief_scope` to use DEBUG level instead of INFO in `backend/src/core/logger.py`
|
||||
- Change Entry/Exit/Coherence logs from `logger.info()` to `logger.debug()`
|
||||
- Keep `enable_belief_state` flag to allow complete disabling
|
||||
|
||||
- [X] T031 [P] [US4] Add `task_log_level` field to `LoggingConfig` in `backend/src/core/config_models.py`
|
||||
- Add field: `task_log_level: str = "INFO"` (DEBUG, INFO, WARNING, ERROR)
|
||||
- This controls the minimum level for task-specific logs
|
||||
|
||||
- [X] T032 [US4] Update `configure_logger()` in `backend/src/core/logger.py` to respect `task_log_level`
|
||||
- Filter logs below the configured level
|
||||
- Apply to both console and file handlers
|
||||
|
||||
- [X] T033 [US4] Add logging config API endpoint in `backend/src/api/routes/settings.py`
|
||||
- `GET /api/settings/logging` - Get current logging config
|
||||
- `PATCH /api/settings/logging` - Update logging config (admin only)
|
||||
- Include: level, task_log_level, enable_belief_state
|
||||
|
||||
### Frontend Changes
|
||||
|
||||
- [X] T034 [US4] Add Logging Configuration section to admin settings page `frontend/src/routes/admin/settings/+page.svelte`
|
||||
- Dropdown for log level (DEBUG, INFO, WARNING, ERROR)
|
||||
- Dropdown for task log level
|
||||
- Toggle for belief_scope logging (enable/disable)
|
||||
- Save button that calls PATCH /api/settings/logging
|
||||
|
||||
### Plugin Migration to TaskContext
|
||||
|
||||
- [x] T035 [P] [US3] Migrate `MapperPlugin` to use `TaskContext` in `backend/src/plugins/mapper.py`
|
||||
- Add context parameter to execute()
|
||||
- Use context.logger for all logging
|
||||
- Add source attribution (e.g., "superset_api", "postgres")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, context parameter, and source attribution with superset_api and postgres loggers.
|
||||
|
||||
- [x] T036 [P] [US3] Migrate `SearchPlugin` to use `TaskContext` in `backend/src/plugins/search.py`
|
||||
- Add context parameter to execute()
|
||||
- Use context.logger for all logging
|
||||
- Add source attribution (e.g., "superset_api", "search")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, context parameter, and source attribution with superset_api and search loggers.
|
||||
|
||||
- [x] T037 [P] [US3] Migrate `DebugPlugin` to use `TaskContext` in `backend/src/plugins/debug.py`
|
||||
- Add context parameter to execute()
|
||||
- Use context.logger for all logging
|
||||
- Add source attribution (e.g., "superset_api", "debug")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, context parameter, and source attribution with debug and superset_api loggers.
|
||||
|
||||
- [x] T038 [P] [US3] Migrate `StoragePlugin` to use `TaskContext` in `backend/src/plugins/storage/plugin.py`
|
||||
- Add context parameter to execute()
|
||||
- Use context.logger for all logging
|
||||
- Add source attribution (e.g., "storage", "filesystem")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, context parameter, and source attribution with storage and filesystem loggers.
|
||||
|
||||
- [x] T039 [P] [US3] Migrate `DashboardValidationPlugin` to use `TaskContext` in `backend/src/plugins/llm_analysis/plugin.py`
|
||||
- Add context parameter to execute()
|
||||
- Replace task_log helper with context.logger
|
||||
- Add source attribution (e.g., "llm", "screenshot", "superset_api")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, replaced task_log helper with context.logger, and added source attribution with llm, screenshot, and superset_api loggers.
|
||||
|
||||
- [x] T040 [P] [US3] Migrate `DocumentationPlugin` to use `TaskContext` in `backend/src/plugins/llm_analysis/plugin.py`
|
||||
- Add context parameter to execute()
|
||||
- Use context.logger for all logging
|
||||
- Add source attribution (e.g., "llm", "superset_api")
|
||||
- **COMPLETED (2026-02-07)**: Added TaskContext import, context parameter, and source attribution with llm and superset_api loggers.
|
||||
|
||||
### Documentation & Tests
|
||||
|
||||
- [x] T041 [P] [US4] Update `docs/plugin_dev.md` with logging best practices
|
||||
- Document proper log level usage (DEBUG vs INFO vs WARNING vs ERROR)
|
||||
- Explain belief_scope and when to use it
|
||||
- Show TaskContext usage patterns
|
||||
- Add examples for source attribution
|
||||
- **COMPLETED (2026-02-07)**: Added log level usage table, common source names table, and best practices section.
|
||||
|
||||
- [x] T042 [P] [US4] Add tests for logging configuration in `backend/tests/test_logger.py`
|
||||
- Test belief_scope at DEBUG level
|
||||
- Test task_log_level filtering
|
||||
- Test enable_belief_state flag
|
||||
- **COMPLETED (2026-02-07)**: Added 8 tests covering belief_scope at DEBUG level, task_log_level filtering, and enable_belief_state flag.
|
||||
|
||||
**Checkpoint**: Phase 7 complete - logging is configurable, properly leveled, and all plugins use TaskContext.
|
||||
|
||||
---
|
||||
|
||||
## Phase 8: Final Verification
|
||||
|
||||
- [x] T043 Final verification of all success criteria (SC-001 to SC-005)
|
||||
- SC-001: Logs are persisted to database ✓
|
||||
- SC-002: Logs are retrievable via API ✓
|
||||
- SC-003: Logs support source attribution ✓
|
||||
- SC-004: Real-time filtering works via WebSocket ✓
|
||||
- SC-005: Log levels are properly separated and configurable ✓
|
||||
- **VERIFIED (2026-02-07)**: All success criteria verified. All tests pass.
|
||||
@@ -1,51 +0,0 @@
|
||||
# UX Reference: Task Logging System (Separated Per-Task Logs)
|
||||
|
||||
**Feature Branch**: `018-task-logging-v2`
|
||||
**Created**: 2026-02-07
|
||||
**Status**: Draft
|
||||
|
||||
## 1. User Persona & Context
|
||||
|
||||
* **Who is the user?**: System Administrator / DevOps Engineer / Developer.
|
||||
* **What is their goal?**: Monitor task execution in real-time, debug failures by isolating logs from specific components (e.g., Superset API vs. Git operations), and review historical logs of completed tasks.
|
||||
* **Context**: Using the web dashboard to run migrations, backups, or LLM analysis.
|
||||
|
||||
## 2. The "Happy Path" Narrative
|
||||
|
||||
The user starts a migration task and immediately sees the log panel. As the task progresses, logs appear with clear color-coded tags indicating their source (e.g., `[superset_api]`, `[git]`). The user notices a warning from the `superset_api` source, clicks the "Source" filter, and selects "superset_api" to isolate those messages. The view instantly updates to show only API-related logs, allowing the user to quickly identify a rate-limiting warning. Once the task finishes, the user refreshes the page, and the logs are still there, fully searchable and filterable.
|
||||
|
||||
## 3. Interface Mockups
|
||||
|
||||
### UI Layout & Flow
|
||||
|
||||
**Screen/Component**: TaskLogPanel (within Task Details)
|
||||
|
||||
* **Layout**: A header with filter controls, followed by a scrollable log area.
|
||||
* **Key Elements**:
|
||||
* **Level Filter**: Dropdown (All, Info, Warning, Error, Debug).
|
||||
* **Source Filter**: Dropdown (All, plugin, superset_api, storage, etc.).
|
||||
* **Search Input**: Text field with "Search logs..." placeholder.
|
||||
* **Log Area**: Monospace font, alternating row backgrounds.
|
||||
* **Progress Bar**: Inline bar appearing within a log row when a component reports progress.
|
||||
* **States**:
|
||||
* **Default**: Shows all logs for the current task.
|
||||
* **Filtering**: Log list updates as filters are changed (live for running tasks via WS).
|
||||
* **Auto-scroll**: Enabled by default; a "Jump to Bottom" button appears if the user scrolls up.
|
||||
|
||||
## 4. The "Error" Experience
|
||||
|
||||
### Scenario A: Log Loading Failure
|
||||
|
||||
* **User Action**: Opens a historical task with thousands of logs.
|
||||
* **System Response**: A small inline notification: "Failed to load full log history. [Retry]".
|
||||
* **Recovery**: Clicking "Retry" re-triggers the REST API call.
|
||||
|
||||
### Scenario B: WebSocket Disconnect
|
||||
|
||||
* **System Response**: The log panel shows a "Reconnecting..." status indicator.
|
||||
* **Recovery**: System automatically reconnects; once back, it fetches missing logs since the last received timestamp.
|
||||
|
||||
## 5. Tone & Voice
|
||||
|
||||
* **Style**: Technical, precise, and informative.
|
||||
* **Terminology**: Use "Source" for component identification, "Level" for severity, and "Persistence" for saved logs.
|
||||
@@ -131,33 +131,33 @@
|
||||
- 📝 Clears authentication state and storage.
|
||||
- ƒ **setLoading** (`Function`)
|
||||
- 📝 Updates the loading state.
|
||||
- 🧩 **Select** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Select** (`Component`)
|
||||
- 📝 Standardized dropdown selection component.
|
||||
- 🏗️ Layer: Atom
|
||||
- 📥 Props: label: string , value: string | number , disabled: boolean
|
||||
- 📦 **ui** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **ui** (`Module`)
|
||||
- 📝 Central export point for standardized UI components.
|
||||
- 🏗️ Layer: Atom
|
||||
- 🔒 Invariant: All components exported here must follow Semantic Protocol.
|
||||
- 🧩 **PageHeader** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **PageHeader** (`Component`)
|
||||
- 📝 Standardized page header with title and action area.
|
||||
- 🏗️ Layer: Atom
|
||||
- 📥 Props: title: string
|
||||
- 🧩 **Card** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Card** (`Component`)
|
||||
- 📝 Standardized container with padding and elevation.
|
||||
- 🏗️ Layer: Atom
|
||||
- 📥 Props: title: string
|
||||
- 🧩 **Button** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Button** (`Component`)
|
||||
- 📝 Define component interface and default values.
|
||||
- 🏗️ Layer: Atom
|
||||
- 🔒 Invariant: Supports accessible labels and keyboard navigation.
|
||||
- 📥 Props: isLoading: boolean , disabled: boolean
|
||||
- 🧩 **Input** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Input** (`Component`)
|
||||
- 📝 Standardized text input component with label and error handling.
|
||||
- 🏗️ Layer: Atom
|
||||
- 🔒 Invariant: Consistent spacing and focus states.
|
||||
- 📥 Props: label: string , value: string , placeholder: string , error: string , disabled: boolean
|
||||
- 🧩 **LanguageSwitcher** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **LanguageSwitcher** (`Component`)
|
||||
- 📝 Dropdown component to switch between supported languages.
|
||||
- 🏗️ Layer: Atom
|
||||
- ⬅️ READS_FROM `lib`
|
||||
@@ -237,7 +237,7 @@
|
||||
- ƒ **handleDeleteUser** (`Function`)
|
||||
- 📝 Deletes a user after confirmation.
|
||||
- 🧩 **AdminSettingsPage** (`Component`)
|
||||
- 📝 UI for configuring Active Directory Group to local Role mappings for ADFS SSO and logging settings.
|
||||
- 📝 UI for configuring Active Directory Group to local Role mappings for ADFS SSO.
|
||||
- 🏗️ Layer: Feature
|
||||
- 🔒 Invariant: Only accessible by users with "admin:settings" permission.
|
||||
- ⬅️ READS_FROM `lib`
|
||||
@@ -247,18 +247,6 @@
|
||||
- 📝 Fetches AD mappings and roles from the backend to populate the UI.
|
||||
- ƒ **handleCreateMapping** (`Function`)
|
||||
- 📝 Submits a new AD Group to Role mapping to the backend.
|
||||
- ƒ **loadLoggingConfig** (`Function`)
|
||||
- 📝 Fetches current logging configuration from the backend.
|
||||
- ƒ **saveLoggingConfig** (`Function`)
|
||||
- 📝 Saves logging configuration to the backend.
|
||||
- 🧩 **LLMSettingsPage** (`Component`)
|
||||
- 📝 Admin settings page for LLM provider configuration.
|
||||
- 🏗️ Layer: UI
|
||||
- 📦 **+page** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/routes/admin/settings/llm/+page.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **fetchProviders** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **MigrationDashboard** (`Component`)
|
||||
- 📝 Main dashboard for configuring and starting migrations.
|
||||
- 🏗️ Layer: Page
|
||||
@@ -309,11 +297,11 @@
|
||||
- 📝 Updates the current path and reloads files when navigating into a directory.
|
||||
- ƒ **navigateUp** (`Function`)
|
||||
- 📝 Navigates one level up in the directory structure.
|
||||
- 🧩 **MapperPage** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **MapperPage** (`Component`)
|
||||
- 📝 Page for the dataset column mapper tool.
|
||||
- 🏗️ Layer: UI
|
||||
- ⬅️ READS_FROM `lib`
|
||||
- 🧩 **DebugPage** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **DebugPage** (`Component`)
|
||||
- 📝 Page for system diagnostics and debugging.
|
||||
- 🏗️ Layer: UI
|
||||
- ⬅️ READS_FROM `lib`
|
||||
@@ -427,10 +415,6 @@
|
||||
- 📝 Deletes a role.
|
||||
- ƒ **getPermissions** (`Function`)
|
||||
- 📝 Fetches all available permissions.
|
||||
- ƒ **getLoggingConfig** (`Function`)
|
||||
- 📝 Fetches current logging configuration.
|
||||
- ƒ **updateLoggingConfig** (`Function`)
|
||||
- 📝 Updates logging configuration.
|
||||
- ƒ **getTasks** (`Function`)
|
||||
- 📝 Fetch a list of tasks with pagination and optional status filter.
|
||||
- ƒ **getTask** (`Function`)
|
||||
@@ -446,8 +430,6 @@
|
||||
- 📦 **storageService** (`Module`)
|
||||
- 📝 Frontend API client for file storage management.
|
||||
- 🏗️ Layer: Service
|
||||
- ƒ **getStorageAuthHeaders** (`Function`)
|
||||
- 📝 Returns headers with Authorization for storage API calls.
|
||||
- ƒ **listFiles** (`Function`)
|
||||
- 📝 Fetches the list of files for a given category and subpath.
|
||||
- ƒ **uploadFile** (`Function`)
|
||||
@@ -475,7 +457,7 @@
|
||||
- ƒ **getSuggestion** (`Function`)
|
||||
- 📝 Finds a suggestion for a source database.
|
||||
- 🧩 **TaskLogViewer** (`Component`)
|
||||
- 📝 Displays detailed logs for a specific task in a modal or inline using TaskLogPanel.
|
||||
- 📝 Displays detailed logs for a specific task in a modal or inline.
|
||||
- 🏗️ Layer: UI
|
||||
- 📥 Props: show: any, inline: any, taskId: any, taskStatus: any
|
||||
- ⚡ Events: close
|
||||
@@ -483,16 +465,17 @@
|
||||
- ➡️ WRITES_TO `t`
|
||||
- ƒ **fetchLogs** (`Function`)
|
||||
- 📝 Fetches logs for the current task.
|
||||
- ƒ **scrollToBottom** (`Function`)
|
||||
- 📝 Scrolls the log container to the bottom.
|
||||
- ƒ **handleScroll** (`Function`)
|
||||
- 📝 Updates auto-scroll preference based on scroll position.
|
||||
- ƒ **close** (`Function`)
|
||||
- 📝 Closes the log viewer modal.
|
||||
- ƒ **getLogLevelColor** (`Function`)
|
||||
- 📝 Returns the CSS color class for a given log level.
|
||||
- ƒ **onDestroy** (`Function`)
|
||||
- 📝 Cleans up the polling interval.
|
||||
- 📦 **TaskLogViewer** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/TaskLogViewer.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **handleFilterChange** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **Footer** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Footer** (`Component`)
|
||||
- 📝 Displays the application footer with copyright information.
|
||||
- 🏗️ Layer: UI
|
||||
- 🧩 **MissingMappingModal** (`Component`)
|
||||
@@ -509,12 +492,10 @@
|
||||
- 📝 Displays a grid of dashboards with selection and pagination.
|
||||
- 🏗️ Layer: Component
|
||||
- 🔒 Invariant: Selected IDs must be a subset of available dashboards.
|
||||
- 📥 Props: dashboards: DashboardMetadata[] , selectedIds: number[] , environmentId: string
|
||||
- 📥 Props: dashboards: DashboardMetadata[] , selectedIds: number[]
|
||||
- ⚡ Events: selectionChanged
|
||||
- ➡️ WRITES_TO `t`
|
||||
- ⬅️ READS_FROM `t`
|
||||
- ƒ **handleValidate** (`Function`)
|
||||
- 📝 Triggers dashboard validation task.
|
||||
- ƒ **handleSort** (`Function`)
|
||||
- 📝 Toggles sort direction or changes sort column.
|
||||
- ƒ **handleSelectionChange** (`Function`)
|
||||
@@ -553,33 +534,32 @@
|
||||
- 📝 Initializes the component by fetching tasks and starting polling.
|
||||
- ƒ **onDestroy** (`Function`)
|
||||
- 📝 Cleans up the polling interval when the component is destroyed.
|
||||
- 🧩 **Toast** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **Toast** (`Component`)
|
||||
- 📝 Displays transient notifications (toasts) in the bottom-right corner.
|
||||
- 🏗️ Layer: UI
|
||||
- ⬅️ READS_FROM `toasts`
|
||||
- 🧩 **TaskRunner** (`Component`)
|
||||
- 📝 Connects to a WebSocket to display real-time logs for a running task with filtering support.
|
||||
- 📝 Connects to a WebSocket to display real-time logs for a running task.
|
||||
- 🏗️ Layer: UI
|
||||
- ⬅️ READS_FROM `selectedTask`
|
||||
- ➡️ WRITES_TO `selectedTask`
|
||||
- ➡️ WRITES_TO `taskLogs`
|
||||
- ⬅️ READS_FROM `taskLogs`
|
||||
- ƒ **connect** (`Function`)
|
||||
- 📝 Establishes WebSocket connection with exponential backoff and filter parameters.
|
||||
- ƒ **handleFilterChange** (`Function`)
|
||||
- 📝 Handles filter changes and reconnects WebSocket with new parameters.
|
||||
- 📝 Establishes WebSocket connection with exponential backoff.
|
||||
- ƒ **fetchTargetDatabases** (`Function`)
|
||||
- 📝 Fetches available databases from target environment for mapping.
|
||||
- 📝 Fetches the list of databases in the target environment.
|
||||
- ƒ **handleMappingResolve** (`Function`)
|
||||
- 📝 Resolves missing database mapping and continues migration.
|
||||
- 📝 Handles the resolution of a missing database mapping.
|
||||
- ƒ **handlePasswordResume** (`Function`)
|
||||
- 📝 Submits passwords and resumes paused migration task.
|
||||
- 📝 Handles the submission of database passwords to resume a task.
|
||||
- ƒ **startDataTimeout** (`Function`)
|
||||
- 📝 Starts timeout timer to detect idle connection.
|
||||
- 📝 Starts a timeout to detect when the log stream has stalled.
|
||||
- ƒ **resetDataTimeout** (`Function`)
|
||||
- 📝 Resets data timeout timer when new data arrives.
|
||||
- 📝 Resets the data stall timeout.
|
||||
- ƒ **onMount** (`Function`)
|
||||
- 📝 Initializes WebSocket connection when component mounts.
|
||||
- 📝 Initializes the component and subscribes to task selection changes.
|
||||
- ƒ **onDestroy** (`Function`)
|
||||
- 📝 Close WebSocket connection when the component is destroyed.
|
||||
- 🧩 **TaskList** (`Component`)
|
||||
- 📝 Displays a list of tasks with their status and execution details.
|
||||
- 🏗️ Layer: Component
|
||||
@@ -610,53 +590,12 @@
|
||||
- ⚡ Events: change
|
||||
- ƒ **handleSelect** (`Function`)
|
||||
- 📝 Dispatches the selection change event.
|
||||
- 🧩 **ProtectedRoute** (`Component`) `[TRIVIAL]`
|
||||
- 🧩 **ProtectedRoute** (`Component`)
|
||||
- 📝 Wraps content to ensure only authenticated users can access it.
|
||||
- 🏗️ Layer: Component
|
||||
- 🔒 Invariant: Redirects to /login if user is not authenticated.
|
||||
- ⬅️ READS_FROM `app`
|
||||
- ⬅️ READS_FROM `auth`
|
||||
- 🧩 **TaskLogPanel** (`Component`)
|
||||
- 📝 Scrolls the log container to the bottom.
|
||||
- 🏗️ Layer: UI
|
||||
- 🔒 Invariant: Must always display logs in chronological order and respect auto-scroll preference.
|
||||
- 📥 Props: taskId: any, logs: any, autoScroll: any
|
||||
- ⚡ Events: filterChange
|
||||
- 📦 **TaskLogPanel** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/tasks/TaskLogPanel.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **handleFilterChange** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **scrollToBottom** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **LogFilterBar** (`Component`)
|
||||
- 📝 UI component for filtering logs by level, source, and text search. -->
|
||||
- 🏗️ Layer: UI -->
|
||||
- 📥 Props: availableSources: any, selectedLevel: any, selectedSource: any, searchText: any
|
||||
- 📦 **LogFilterBar** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/tasks/LogFilterBar.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **handleLevelChange** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **handleSourceChange** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **handleSearchChange** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **clearFilters** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **LogEntryRow** (`Component`)
|
||||
- 📝 Optimized row rendering for a single log entry with color coding and progress bar support. -->
|
||||
- 🏗️ Layer: UI -->
|
||||
- 📥 Props: log: any, showSource: any
|
||||
- 📦 **LogEntryRow** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/tasks/LogEntryRow.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **formatTime** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **getLevelClass** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **getSourceClass** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **FileList** (`Component`)
|
||||
- 📝 Displays a table of files with metadata and actions.
|
||||
- 🏗️ Layer: UI
|
||||
@@ -708,13 +647,6 @@
|
||||
- 📝 Fetches environments and saved connections.
|
||||
- ƒ **handleRunMapper** (`Function`)
|
||||
- 📝 Triggers the MapperPlugin task.
|
||||
- ƒ **handleGenerateDocs** (`Function`)
|
||||
- 📝 Triggers the LLM Documentation task.
|
||||
- 📦 **MapperTool** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/tools/MapperTool.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **handleApplyDoc** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **DebugTool** (`Component`)
|
||||
- 📝 UI component for system diagnostics and debugging API responses.
|
||||
- 🏗️ Layer: UI
|
||||
@@ -760,8 +692,6 @@
|
||||
- 🏗️ Layer: Component
|
||||
- 📥 Props: dashboardId: any, show: any
|
||||
- ⚡ Events: commit
|
||||
- ƒ **handleGenerateMessage** (`Function`)
|
||||
- 📝 Generates a commit message using LLM.
|
||||
- ƒ **loadStatus** (`Function`)
|
||||
- 📝 Загружает текущий статус репозитория и diff.
|
||||
- ƒ **handleCommit** (`Function`)
|
||||
@@ -798,55 +728,14 @@
|
||||
- 📝 Pushes local commits to the remote repository.
|
||||
- ƒ **handlePull** (`Function`)
|
||||
- 📝 Pulls changes from the remote repository.
|
||||
- 🧩 **DocPreview** (`Component`)
|
||||
- 📝 UI component for previewing generated dataset documentation before saving.
|
||||
- 🏗️ Layer: UI
|
||||
- 📥 Props: documentation: any, onSave: any, onCancel: any
|
||||
- ➡️ WRITES_TO `t`
|
||||
- ⬅️ READS_FROM `t`
|
||||
- 📦 **DocPreview** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/llm/DocPreview.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **handleSave** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 🧩 **ProviderConfig** (`Component`)
|
||||
- 📝 UI form for managing LLM provider configurations.
|
||||
- 🏗️ Layer: UI
|
||||
- 📥 Props: providers: any, onSave: any
|
||||
- ➡️ WRITES_TO `t`
|
||||
- ⬅️ READS_FROM `t`
|
||||
- 📦 **ProviderConfig** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/llm/ProviderConfig.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **resetForm** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **handleEdit** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **testConnection** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **handleSubmit** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **toggleActive** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **ValidationReport** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for frontend/src/components/llm/ValidationReport.svelte
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **getStatusColor** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **test_auth_debug** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/test_auth_debug.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **main** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.delete_running_tasks** (`Module`)
|
||||
- 📝 Script to delete tasks with RUNNING status from the database.
|
||||
- 🏗️ Layer: Utility
|
||||
- ƒ **delete_running_tasks** (`Function`)
|
||||
- 📝 Delete all tasks with RUNNING status from the database.
|
||||
- 📦 **AppModule** (`Module`) `[CRITICAL]`
|
||||
- 📦 **AppModule** (`Module`)
|
||||
- 📝 The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming.
|
||||
- 🏗️ Layer: UI (API)
|
||||
- 🔒 Invariant: All WebSocket connections must be properly cleaned up on disconnect.
|
||||
- 📦 **App** (`Global`)
|
||||
- 📝 The global FastAPI application instance.
|
||||
- ƒ **startup_event** (`Function`)
|
||||
@@ -855,18 +744,14 @@
|
||||
- 📝 Handles application shutdown tasks, such as stopping the scheduler.
|
||||
- ƒ **log_requests** (`Function`)
|
||||
- 📝 Middleware to log incoming HTTP requests and their response status.
|
||||
- ƒ **websocket_endpoint** (`Function`) `[CRITICAL]`
|
||||
- 📝 Provides a WebSocket endpoint for real-time log streaming of a task with server-side filtering.
|
||||
- ƒ **websocket_endpoint** (`Function`)
|
||||
- 📝 Provides a WebSocket endpoint for real-time log streaming of a task.
|
||||
- 📦 **StaticFiles** (`Mount`)
|
||||
- 📝 Mounts the frontend build directory to serve static assets.
|
||||
- ƒ **serve_spa** (`Function`)
|
||||
- 📝 Serves frontend static files or index.html for SPA routing.
|
||||
- ƒ **read_root** (`Function`)
|
||||
- 📝 A simple root endpoint to confirm that the API is running when frontend is missing.
|
||||
- ƒ **network_error_handler** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **matches_filters** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **Dependencies** (`Module`)
|
||||
- 📝 Manages the creation and provision of shared application dependencies, such as the PluginLoader and TaskManager, to avoid circular imports.
|
||||
- 🏗️ Layer: Core
|
||||
@@ -1056,8 +941,6 @@
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: A single engine instance is used for the entire application.
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy`
|
||||
- 📦 **BASE_DIR** (`Variable`)
|
||||
- 📝 Base directory for the backend (where .db files should reside).
|
||||
- 📦 **DATABASE_URL** (`Constant`)
|
||||
- 📝 URL for the main mappings database.
|
||||
- 📦 **TASKS_DATABASE_URL** (`Constant`)
|
||||
@@ -1097,10 +980,6 @@
|
||||
- 📝 Context manager for structured Belief State logging.
|
||||
- ƒ **configure_logger** (`Function`)
|
||||
- 📝 Configures the logger with the provided logging settings.
|
||||
- ƒ **get_task_log_level** (`Function`)
|
||||
- 📝 Returns the current task log level filter.
|
||||
- ƒ **should_log_task_level** (`Function`)
|
||||
- 📝 Checks if a log level should be recorded based on task_log_level setting.
|
||||
- ℂ **WebSocketLogHandler** (`Class`)
|
||||
- 📝 A custom logging handler that captures log records into a buffer. It is designed to be extended for real-time log streaming over WebSockets.
|
||||
- ƒ **__init__** (`Function`)
|
||||
@@ -1353,30 +1232,6 @@
|
||||
- 🔗 CALLS -> `self.load_excel_mappings`
|
||||
- 🔗 CALLS -> `superset_client.get_dataset`
|
||||
- 🔗 CALLS -> `superset_client.update_dataset`
|
||||
- 📦 **TaskLoggerModule** (`Module`) `[CRITICAL]`
|
||||
- 📝 Provides a dedicated logger for tasks with automatic source attribution.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Each TaskLogger instance is bound to a specific task_id and default source.
|
||||
- 🔗 DEPENDS_ON -> `TaskManager, CALLS -> TaskManager._add_log`
|
||||
- ℂ **TaskLogger** (`Class`) `[CRITICAL]`
|
||||
- 📝 A wrapper around TaskManager._add_log that carries task_id and source context.
|
||||
- 🔒 Invariant: All log calls include the task_id and source.
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initialize the TaskLogger with task context.
|
||||
- ƒ **with_source** (`Function`)
|
||||
- 📝 Create a sub-logger with a different default source.
|
||||
- ƒ **_log** (`Function`)
|
||||
- 📝 Internal method to log a message at a given level.
|
||||
- ƒ **debug** (`Function`)
|
||||
- 📝 Log a DEBUG level message.
|
||||
- ƒ **info** (`Function`)
|
||||
- 📝 Log an INFO level message.
|
||||
- ƒ **warning** (`Function`)
|
||||
- 📝 Log a WARNING level message.
|
||||
- ƒ **error** (`Function`)
|
||||
- 📝 Log an ERROR level message.
|
||||
- ƒ **progress** (`Function`)
|
||||
- 📝 Log a progress update with percentage.
|
||||
- 📦 **TaskPersistenceModule** (`Module`)
|
||||
- 📝 Handles the persistence of tasks using SQLAlchemy and the tasks.db database.
|
||||
- 🏗️ Layer: Core
|
||||
@@ -1393,45 +1248,18 @@
|
||||
- 📝 Loads tasks from the database.
|
||||
- ƒ **delete_tasks** (`Function`)
|
||||
- 📝 Deletes specific tasks from the database.
|
||||
- ℂ **TaskLogPersistenceService** (`Class`) `[CRITICAL]`
|
||||
- 📝 Provides methods to save and query task logs from the task_logs table.
|
||||
- 🔒 Invariant: Log entries are batch-inserted for performance.
|
||||
- 🔗 DEPENDS_ON -> `TaskLogRecord`
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initialize the log persistence service.
|
||||
- ƒ **add_logs** (`Function`)
|
||||
- 📝 Batch insert log entries for a task.
|
||||
- ƒ **get_logs** (`Function`)
|
||||
- 📝 Query logs for a task with filtering and pagination.
|
||||
- ƒ **get_log_stats** (`Function`)
|
||||
- 📝 Get statistics about logs for a task.
|
||||
- ƒ **get_sources** (`Function`)
|
||||
- 📝 Get unique sources for a task's logs.
|
||||
- ƒ **delete_logs_for_task** (`Function`)
|
||||
- 📝 Delete all logs for a specific task.
|
||||
- ƒ **delete_logs_for_tasks** (`Function`)
|
||||
- 📝 Delete all logs for multiple tasks.
|
||||
- ƒ **json_serializable** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **TaskManagerModule** (`Module`)
|
||||
- 📝 Manages the lifecycle of tasks, including their creation, execution, and state tracking. It uses a thread pool to run plugins asynchronously.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Task IDs are unique.
|
||||
- ℂ **TaskManager** (`Class`) `[CRITICAL]`
|
||||
- ℂ **TaskManager** (`Class`)
|
||||
- 📝 Manages the lifecycle of tasks, including their creation, execution, and state tracking.
|
||||
- 🔒 Invariant: Log entries are never deleted after being added to a task.
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initialize the TaskManager with dependencies.
|
||||
- ƒ **_flusher_loop** (`Function`)
|
||||
- 📝 Background thread that periodically flushes log buffer to database.
|
||||
- ƒ **_flush_logs** (`Function`)
|
||||
- 📝 Flush all buffered logs to the database.
|
||||
- ƒ **_flush_task_logs** (`Function`)
|
||||
- 📝 Flush logs for a specific task immediately.
|
||||
- ƒ **create_task** (`Function`)
|
||||
- 📝 Creates and queues a new task for execution.
|
||||
- ƒ **_run_task** (`Function`)
|
||||
- 📝 Internal method to execute a task with TaskContext support.
|
||||
- 📝 Internal method to execute a task.
|
||||
- ƒ **resolve_task** (`Function`)
|
||||
- 📝 Resumes a task that is awaiting mapping.
|
||||
- ƒ **wait_for_resolution** (`Function`)
|
||||
@@ -1445,13 +1273,9 @@
|
||||
- ƒ **get_tasks** (`Function`)
|
||||
- 📝 Retrieves tasks with pagination and optional status filter.
|
||||
- ƒ **get_task_logs** (`Function`)
|
||||
- 📝 Retrieves logs for a specific task (from memory for running, persistence for completed).
|
||||
- ƒ **get_task_log_stats** (`Function`)
|
||||
- 📝 Get statistics about logs for a task.
|
||||
- ƒ **get_task_log_sources** (`Function`)
|
||||
- 📝 Get unique sources for a task's logs.
|
||||
- 📝 Retrieves logs for a specific task.
|
||||
- ƒ **_add_log** (`Function`)
|
||||
- 📝 Adds a log entry to a task buffer and notifies subscribers.
|
||||
- 📝 Adds a log entry to a task and notifies subscribers.
|
||||
- ƒ **subscribe_logs** (`Function`)
|
||||
- 📝 Subscribes to real-time logs for a task.
|
||||
- ƒ **unsubscribe_logs** (`Function`)
|
||||
@@ -1463,64 +1287,31 @@
|
||||
- ƒ **resume_task_with_password** (`Function`)
|
||||
- 📝 Resume a task that is awaiting input with provided passwords.
|
||||
- ƒ **clear_tasks** (`Function`)
|
||||
- 📝 Clears tasks based on status filter (also deletes associated logs).
|
||||
- 📝 Clears tasks based on status filter.
|
||||
- 📦 **TaskManagerModels** (`Module`)
|
||||
- 📝 Defines the data models and enumerations used by the Task Manager.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Task IDs are immutable once created.
|
||||
- 📦 **TaskStatus** (`Enum`) `[TRIVIAL]`
|
||||
- 📦 **TaskStatus** (`Enum`)
|
||||
- 📝 Defines the possible states a task can be in during its lifecycle.
|
||||
- 📦 **LogLevel** (`Enum`)
|
||||
- 📝 Defines the possible log levels for task logging.
|
||||
- ℂ **LogEntry** (`Class`) `[CRITICAL]`
|
||||
- ℂ **LogEntry** (`Class`)
|
||||
- 📝 A Pydantic model representing a single, structured log entry associated with a task.
|
||||
- 🔒 Invariant: Each log entry has a unique timestamp and source.
|
||||
- ℂ **TaskLog** (`Class`)
|
||||
- 📝 A Pydantic model representing a persisted log entry from the database.
|
||||
- ℂ **LogFilter** (`Class`)
|
||||
- 📝 Filter parameters for querying task logs.
|
||||
- ℂ **LogStats** (`Class`)
|
||||
- 📝 Statistics about log entries for a task.
|
||||
- ℂ **Task** (`Class`)
|
||||
- 📝 A Pydantic model representing a single execution instance of a plugin, including its status, parameters, and logs.
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initializes the Task model and validates input_request for AWAITING_INPUT status.
|
||||
- 📦 **TaskCleanupModule** (`Module`)
|
||||
- 📝 Implements task cleanup and retention policies, including associated logs.
|
||||
- 📝 Implements task cleanup and retention policies.
|
||||
- 🏗️ Layer: Core
|
||||
- ℂ **TaskCleanupService** (`Class`)
|
||||
- 📝 Provides methods to clean up old task records and their associated logs.
|
||||
- 📝 Provides methods to clean up old task records.
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initializes the cleanup service with dependencies.
|
||||
- ƒ **run_cleanup** (`Function`)
|
||||
- 📝 Deletes tasks older than the configured retention period and their logs.
|
||||
- ƒ **delete_task_with_logs** (`Function`)
|
||||
- 📝 Delete a single task and all its associated logs.
|
||||
- 📦 **TaskManagerPackage** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Deletes tasks older than the configured retention period.
|
||||
- 📦 **TaskManagerPackage** (`Module`)
|
||||
- 📝 Exports the public API of the task manager package.
|
||||
- 🏗️ Layer: Core
|
||||
- 📦 **TaskContextModule** (`Module`) `[CRITICAL]`
|
||||
- 📝 Provides execution context passed to plugins during task execution.
|
||||
- 🏗️ Layer: Core
|
||||
- 🔒 Invariant: Each TaskContext is bound to a single task execution.
|
||||
- 🔗 DEPENDS_ON -> `TaskLogger, USED_BY -> plugins`
|
||||
- ℂ **TaskContext** (`Class`) `[CRITICAL]`
|
||||
- 📝 A container passed to plugin.execute() providing the logger and other task-specific utilities.
|
||||
- 🔒 Invariant: logger is always a valid TaskLogger instance.
|
||||
- ƒ **__init__** (`Function`)
|
||||
- 📝 Initialize the TaskContext with task-specific resources.
|
||||
- ƒ **task_id** (`Function`)
|
||||
- 📝 Get the task ID.
|
||||
- ƒ **logger** (`Function`)
|
||||
- 📝 Get the TaskLogger instance for this context.
|
||||
- ƒ **params** (`Function`)
|
||||
- 📝 Get the task parameters.
|
||||
- ƒ **get_param** (`Function`)
|
||||
- 📝 Get a specific parameter value with optional default.
|
||||
- ƒ **create_sub_context** (`Function`)
|
||||
- 📝 Create a sub-context with a different default source.
|
||||
- ƒ **execute** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.api.auth** (`Module`)
|
||||
- 📝 Authentication API endpoints.
|
||||
- 🏗️ Layer: API
|
||||
@@ -1537,20 +1328,6 @@
|
||||
- 📝 Initiates the ADFS OIDC login flow.
|
||||
- ƒ **auth_callback_adfs** (`Function`)
|
||||
- 📝 Handles the callback from ADFS after successful authentication.
|
||||
- 📦 **router** (`Global`)
|
||||
- 📝 APIRouter instance for LLM routes.
|
||||
- ƒ **get_providers** (`Function`)
|
||||
- 📝 Retrieve all LLM provider configurations.
|
||||
- ƒ **create_provider** (`Function`)
|
||||
- 📝 Create a new LLM provider configuration.
|
||||
- ƒ **update_provider** (`Function`)
|
||||
- 📝 Update an existing LLM provider configuration.
|
||||
- ƒ **delete_provider** (`Function`)
|
||||
- 📝 Delete an LLM provider configuration.
|
||||
- ƒ **test_connection** (`Function`)
|
||||
- 📝 Test connection to an LLM provider.
|
||||
- ƒ **test_provider_config** (`Function`)
|
||||
- 📝 Test connection with a provided configuration (not yet saved).
|
||||
- 📦 **backend.src.api.routes.git** (`Module`)
|
||||
- 📝 Provides FastAPI endpoints for Git integration operations.
|
||||
- 🏗️ Layer: API
|
||||
@@ -1589,8 +1366,6 @@
|
||||
- 📝 Get current Git status for a dashboard repository.
|
||||
- ƒ **get_repository_diff** (`Function`)
|
||||
- 📝 Get Git diff for a dashboard repository.
|
||||
- ƒ **generate_commit_message** (`Function`)
|
||||
- 📝 Generate a suggested commit message using LLM.
|
||||
- 📦 **ConnectionsRouter** (`Module`)
|
||||
- 📝 Defines the FastAPI router for managing external database connections.
|
||||
- 🏗️ Layer: UI (API)
|
||||
@@ -1655,8 +1430,6 @@
|
||||
- 🔒 Invariant: All settings changes must be persisted via ConfigManager.
|
||||
- 🔗 DEPENDS_ON -> `ConfigManager`
|
||||
- 🔗 DEPENDS_ON -> `ConfigModels`
|
||||
- ℂ **LoggingConfigResponse** (`Class`)
|
||||
- 📝 Response model for logging configuration with current task log level.
|
||||
- ƒ **get_settings** (`Function`)
|
||||
- 📝 Retrieves all application settings.
|
||||
- ƒ **update_global_settings** (`Function`)
|
||||
@@ -1675,10 +1448,6 @@
|
||||
- 📝 Deletes a Superset environment.
|
||||
- ƒ **test_environment_connection** (`Function`)
|
||||
- 📝 Tests the connection to a Superset environment.
|
||||
- ƒ **get_logging_config** (`Function`)
|
||||
- 📝 Retrieves current logging configuration.
|
||||
- ƒ **update_logging_config** (`Function`)
|
||||
- 📝 Updates logging configuration.
|
||||
- 📦 **backend.src.api.routes.admin** (`Module`)
|
||||
- 📝 Admin API endpoints for user and role management.
|
||||
- 🏗️ Layer: API
|
||||
@@ -1769,53 +1538,30 @@
|
||||
- 📝 Retrieve a list of tasks with pagination and optional status filter.
|
||||
- ƒ **get_task** (`Function`)
|
||||
- 📝 Retrieve the details of a specific task.
|
||||
- ƒ **get_task_logs** (`Function`) `[CRITICAL]`
|
||||
- 📝 Retrieve logs for a specific task with optional filtering.
|
||||
- ƒ **get_task_log_stats** (`Function`)
|
||||
- 📝 Get statistics about logs for a task (counts by level and source).
|
||||
- ƒ **get_task_log_sources** (`Function`)
|
||||
- 📝 Get unique sources for a task's logs.
|
||||
- ƒ **get_task_logs** (`Function`)
|
||||
- 📝 Retrieve logs for a specific task.
|
||||
- ƒ **resolve_task** (`Function`)
|
||||
- 📝 Resolve a task that is awaiting mapping.
|
||||
- ƒ **resume_task** (`Function`)
|
||||
- 📝 Resume a task that is awaiting input (e.g., passwords).
|
||||
- ƒ **clear_tasks** (`Function`)
|
||||
- 📝 Clear tasks matching the status filter.
|
||||
- 📦 **backend.src.models.llm** (`Module`)
|
||||
- 📝 SQLAlchemy models for LLM provider configuration and validation results.
|
||||
- 🏗️ Layer: Domain
|
||||
- ℂ **LLMProvider** (`Class`)
|
||||
- 📝 SQLAlchemy model for LLM provider configuration.
|
||||
- ℂ **ValidationRecord** (`Class`)
|
||||
- 📝 SQLAlchemy model for dashboard validation history.
|
||||
- ƒ **generate_uuid** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **GitModels** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **GitModels** (`Module`)
|
||||
- 📝 Git-specific SQLAlchemy models for configuration and repository tracking.
|
||||
- 🏗️ Layer: Model
|
||||
- ℂ **GitServerConfig** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Configuration for a Git server connection.
|
||||
- ℂ **GitRepository** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Tracking for a local Git repository linked to a dashboard.
|
||||
- ℂ **DeploymentEnvironment** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Target Superset environments for dashboard deployment.
|
||||
- 📦 **backend.src.models.task** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **backend.src.models.task** (`Module`)
|
||||
- 📝 Defines the database schema for task execution records.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: All primary keys are UUID strings.
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy`
|
||||
- ℂ **TaskRecord** (`Class`) `[TRIVIAL]`
|
||||
- ℂ **TaskRecord** (`Class`)
|
||||
- 📝 Represents a persistent record of a task execution.
|
||||
- ℂ **TaskLogRecord** (`Class`) `[CRITICAL]`
|
||||
- 📝 Represents a single persistent log entry for a task.
|
||||
- 🔒 Invariant: Each log entry belongs to exactly one task.
|
||||
- 🔗 DEPENDS_ON -> `TaskRecord`
|
||||
- 📦 **backend.src.models.connection** (`Module`) `[TRIVIAL]`
|
||||
- 📦 **backend.src.models.connection** (`Module`)
|
||||
- 📝 Defines the database schema for external database connection configurations.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔒 Invariant: All primary keys are UUID strings.
|
||||
- 🔗 DEPENDS_ON -> `sqlalchemy`
|
||||
- ℂ **ConnectionConfig** (`Class`) `[TRIVIAL]`
|
||||
- ℂ **ConnectionConfig** (`Class`)
|
||||
- 📝 Stores credentials for external databases used for column mapping.
|
||||
- 📦 **backend.src.models.mapping** (`Module`)
|
||||
- 📝 Defines the database schema for environment metadata and database mappings using SQLAlchemy.
|
||||
@@ -1828,17 +1574,14 @@
|
||||
- 📝 Represents a Superset instance environment.
|
||||
- ℂ **DatabaseMapping** (`Class`)
|
||||
- 📝 Represents a mapping between source and target databases.
|
||||
- ℂ **MigrationJob** (`Class`) `[TRIVIAL]`
|
||||
- ℂ **MigrationJob** (`Class`)
|
||||
- 📝 Represents a single migration execution job.
|
||||
- 📦 **backend.src.models.storage** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Data models for the storage system.
|
||||
- 🏗️ Layer: Domain
|
||||
- ℂ **FileCategory** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Enumeration of supported file categories in the storage system.
|
||||
- ℂ **StorageConfig** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Configuration model for the storage system, defining paths and naming patterns.
|
||||
- ℂ **StoredFile** (`Class`) `[TRIVIAL]`
|
||||
- 📝 Data model representing metadata for a file stored in the system.
|
||||
- ℂ **FileCategory** (`Class`)
|
||||
- 📝 Enumeration of supported file categories in the storage system.
|
||||
- ℂ **StorageConfig** (`Class`)
|
||||
- 📝 Configuration model for the storage system, defining paths and naming patterns.
|
||||
- ℂ **StoredFile** (`Class`)
|
||||
- 📝 Data model representing metadata for a file stored in the system.
|
||||
- 📦 **backend.src.models.dashboard** (`Module`)
|
||||
- 📝 Defines data models for dashboard metadata and selection.
|
||||
- 🏗️ Layer: Model
|
||||
@@ -1865,44 +1608,6 @@
|
||||
- ℂ **ADGroupMapping** (`Class`)
|
||||
- 📝 Maps an Active Directory group to a local System Role.
|
||||
- 🔗 DEPENDS_ON -> `Role`
|
||||
- 📦 **backend.src.services.llm_provider** (`Module`)
|
||||
- 📝 Service for managing LLM provider configurations with encrypted API keys.
|
||||
- 🏗️ Layer: Domain
|
||||
- 🔗 DEPENDS_ON -> `backend.src.core.database`
|
||||
- 🔗 DEPENDS_ON -> `backend.src.models.llm`
|
||||
- ℂ **EncryptionManager** (`Class`) `[CRITICAL]`
|
||||
- 📝 Handles encryption and decryption of sensitive data like API keys.
|
||||
- 🔒 Invariant: Uses a secret key from environment or a default one (fallback only for dev).
|
||||
- ƒ **EncryptionManager.__init__** (`Function`)
|
||||
- 📝 Initialize the encryption manager with a Fernet key.
|
||||
- ƒ **EncryptionManager.encrypt** (`Function`)
|
||||
- 📝 Encrypt a plaintext string.
|
||||
- ƒ **EncryptionManager.decrypt** (`Function`)
|
||||
- 📝 Decrypt an encrypted string.
|
||||
- ℂ **LLMProviderService** (`Class`)
|
||||
- 📝 Service to manage LLM provider lifecycle.
|
||||
- ƒ **LLMProviderService.__init__** (`Function`)
|
||||
- 📝 Initialize the service with database session.
|
||||
- ƒ **get_all_providers** (`Function`)
|
||||
- 📝 Returns all configured LLM providers.
|
||||
- ƒ **get_provider** (`Function`)
|
||||
- 📝 Returns a single LLM provider by ID.
|
||||
- ƒ **create_provider** (`Function`)
|
||||
- 📝 Creates a new LLM provider with encrypted API key.
|
||||
- ƒ **update_provider** (`Function`)
|
||||
- 📝 Updates an existing LLM provider.
|
||||
- ƒ **delete_provider** (`Function`)
|
||||
- 📝 Deletes an LLM provider.
|
||||
- ƒ **get_decrypted_api_key** (`Function`)
|
||||
- 📝 Returns the decrypted API key for a provider.
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **encrypt** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **decrypt** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **backend.src.services.auth_service** (`Module`)
|
||||
- 📝 Orchestrates authentication business logic.
|
||||
- 🏗️ Layer: Service
|
||||
@@ -1986,7 +1691,7 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for backup plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes the dashboard backup logic with TaskContext support.
|
||||
- 📝 Executes the dashboard backup logic.
|
||||
- 📦 **DebugPluginModule** (`Module`)
|
||||
- 📝 Implements a plugin for system diagnostics and debugging Superset API responses.
|
||||
- 🏗️ Layer: Plugins
|
||||
@@ -2005,7 +1710,7 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for the debug plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes the debug logic with TaskContext support.
|
||||
- 📝 Executes the debug logic.
|
||||
- ƒ **_test_db_api** (`Function`)
|
||||
- 📝 Tests database API connectivity for source and target environments.
|
||||
- ƒ **_get_dataset_structure** (`Function`)
|
||||
@@ -2028,7 +1733,7 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for the search plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes the dataset search logic with TaskContext support.
|
||||
- 📝 Executes the dataset search logic.
|
||||
- ƒ **_get_context** (`Function`)
|
||||
- 📝 Extracts a small context around the match for display.
|
||||
- 📦 **MapperPluginModule** (`Module`)
|
||||
@@ -2049,7 +1754,7 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for the mapper plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes the dataset mapping logic with TaskContext support.
|
||||
- 📝 Executes the dataset mapping logic.
|
||||
- 📦 **backend.src.plugins.git_plugin** (`Module`)
|
||||
- 📝 Предоставляет плагин для версионирования и развертывания дашбордов Superset.
|
||||
- 🏗️ Layer: Plugin
|
||||
@@ -2073,7 +1778,7 @@
|
||||
- ƒ **initialize** (`Function`)
|
||||
- 📝 Выполняет начальную настройку плагина.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Основной метод выполнения задач плагина с поддержкой TaskContext.
|
||||
- 📝 Основной метод выполнения задач плагина.
|
||||
- 🔗 CALLS -> `self._handle_sync`
|
||||
- 🔗 CALLS -> `self._handle_deploy`
|
||||
- ƒ **_handle_sync** (`Function`)
|
||||
@@ -2106,96 +1811,9 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for migration plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes the dashboard migration logic with TaskContext support.
|
||||
- 📝 Executes the dashboard migration logic.
|
||||
- 📦 **MigrationPlugin.execute** (`Action`)
|
||||
- 📝 Execute the migration logic with proper task logging.
|
||||
- ƒ **schedule_dashboard_validation** (`Function`)
|
||||
- 📝 Schedules a recurring dashboard validation task.
|
||||
- ƒ **_parse_cron** (`Function`)
|
||||
- 📝 Basic cron parser placeholder.
|
||||
- 📦 **scheduler** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/src/plugins/llm_analysis/scheduler.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **job_func** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ℂ **LLMProviderType** (`Class`)
|
||||
- 📝 Enum for supported LLM providers.
|
||||
- ℂ **LLMProviderConfig** (`Class`)
|
||||
- 📝 Configuration for an LLM provider.
|
||||
- ℂ **ValidationStatus** (`Class`)
|
||||
- 📝 Enum for dashboard validation status.
|
||||
- ℂ **DetectedIssue** (`Class`)
|
||||
- 📝 Model for a single issue detected during validation.
|
||||
- ℂ **ValidationResult** (`Class`)
|
||||
- 📝 Model for dashboard validation result.
|
||||
- ℂ **DashboardValidationPlugin** (`Class`)
|
||||
- 📝 Plugin for automated dashboard health analysis using LLMs.
|
||||
- 🔗 IMPLEMENTS -> `backend.src.core.plugin_base.PluginBase`
|
||||
- ƒ **DashboardValidationPlugin.execute** (`Function`)
|
||||
- 📝 Executes the dashboard validation task with TaskContext support.
|
||||
- ℂ **DocumentationPlugin** (`Class`)
|
||||
- 📝 Plugin for automated dataset documentation using LLMs.
|
||||
- 🔗 IMPLEMENTS -> `backend.src.core.plugin_base.PluginBase`
|
||||
- ƒ **DocumentationPlugin.execute** (`Function`)
|
||||
- 📝 Executes the dataset documentation task with TaskContext support.
|
||||
- 📦 **plugin** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/src/plugins/llm_analysis/plugin.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **name** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **description** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **version** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_schema** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **execute** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **id** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **name** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **description** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **version** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_schema** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **execute** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ℂ **ScreenshotService** (`Class`)
|
||||
- 📝 Handles capturing screenshots of Superset dashboards.
|
||||
- ƒ **ScreenshotService.__init__** (`Function`)
|
||||
- 📝 Initializes the ScreenshotService with environment configuration.
|
||||
- ƒ **ScreenshotService.capture_dashboard** (`Function`)
|
||||
- 📝 Captures a full-page screenshot of a dashboard using Playwright and CDP.
|
||||
- ℂ **LLMClient** (`Class`)
|
||||
- 📝 Wrapper for LLM provider APIs.
|
||||
- ƒ **LLMClient.__init__** (`Function`)
|
||||
- 📝 Initializes the LLMClient with provider settings.
|
||||
- ƒ **LLMClient.get_json_completion** (`Function`)
|
||||
- 📝 Helper to handle LLM calls with JSON mode and fallback parsing.
|
||||
- ƒ **LLMClient.analyze_dashboard** (`Function`)
|
||||
- 📝 Sends dashboard data (screenshot + logs) to LLM for health analysis.
|
||||
- 📦 **service** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/src/plugins/llm_analysis/service.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **capture_dashboard** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **switch_tabs** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **_should_retry** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **get_json_completion** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **analyze_dashboard** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **StoragePlugin** (`Module`)
|
||||
- 📝 Provides core filesystem operations for managing backups and repositories.
|
||||
- 🏗️ Layer: App
|
||||
@@ -2219,7 +1837,7 @@
|
||||
- ƒ **get_schema** (`Function`)
|
||||
- 📝 Returns the JSON schema for storage plugin parameters.
|
||||
- ƒ **execute** (`Function`)
|
||||
- 📝 Executes storage-related tasks with TaskContext support.
|
||||
- 📝 Executes storage-related tasks (placeholder for PluginBase compliance).
|
||||
- ƒ **get_storage_root** (`Function`)
|
||||
- 📝 Resolves the absolute path to the storage root.
|
||||
- ƒ **resolve_path** (`Function`)
|
||||
@@ -2236,84 +1854,14 @@
|
||||
- 📝 Deletes a file or directory from the specified category and path.
|
||||
- ƒ **get_file_path** (`Function`)
|
||||
- 📝 Returns the absolute path of a file for download.
|
||||
- ℂ **GitLLMExtension** (`Class`)
|
||||
- 📝 Provides LLM capabilities to the Git plugin.
|
||||
- ƒ **suggest_commit_message** (`Function`)
|
||||
- 📝 Generates a suggested commit message based on a diff and history.
|
||||
- 📦 **llm_extension** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/src/plugins/git/llm_extension.py
|
||||
- 🏗️ Layer: Unknown
|
||||
- ƒ **__init__** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **test_environment_model** (`Function`)
|
||||
- 📝 Tests that Environment model correctly stores values.
|
||||
- 📦 **test_task_logger** (`Module`)
|
||||
- 📝 Unit tests for TaskLogger and TaskContext.
|
||||
- 🏗️ Layer: Test
|
||||
- ℂ **TestTaskLogger** (`Class`)
|
||||
- 📝 Test suite for TaskLogger.
|
||||
- ƒ **setup_method** (`Function`)
|
||||
- 📝 Setup for each test method.
|
||||
- ƒ **test_init** (`Function`)
|
||||
- 📝 Test TaskLogger initialization.
|
||||
- ƒ **test_with_source** (`Function`)
|
||||
- 📝 Test creating a sub-logger with different source.
|
||||
- ƒ **test_debug** (`Function`)
|
||||
- 📝 Test debug log level.
|
||||
- ƒ **test_info** (`Function`)
|
||||
- 📝 Test info log level.
|
||||
- ƒ **test_warning** (`Function`)
|
||||
- 📝 Test warning log level.
|
||||
- ƒ **test_error** (`Function`)
|
||||
- 📝 Test error log level.
|
||||
- ƒ **test_error_with_metadata** (`Function`)
|
||||
- 📝 Test error logging with metadata.
|
||||
- ƒ **test_progress** (`Function`)
|
||||
- 📝 Test progress logging.
|
||||
- ƒ **test_progress_clamping** (`Function`)
|
||||
- 📝 Test progress value clamping (0-100).
|
||||
- ƒ **test_source_override** (`Function`)
|
||||
- 📝 Test overriding the default source.
|
||||
- ƒ **test_sub_logger_source_independence** (`Function`)
|
||||
- 📝 Test sub-logger independence from parent.
|
||||
- ℂ **TestTaskContext** (`Class`)
|
||||
- 📝 Test suite for TaskContext.
|
||||
- ƒ **setup_method** (`Function`)
|
||||
- 📝 Setup for each test method.
|
||||
- ƒ **test_init** (`Function`)
|
||||
- 📝 Test TaskContext initialization.
|
||||
- ƒ **test_task_id_property** (`Function`)
|
||||
- 📝 Test task_id property.
|
||||
- ƒ **test_logger_property** (`Function`)
|
||||
- 📝 Test logger property.
|
||||
- ƒ **test_params_property** (`Function`)
|
||||
- 📝 Test params property.
|
||||
- ƒ **test_get_param** (`Function`)
|
||||
- 📝 Test getting a specific parameter.
|
||||
- ƒ **test_create_sub_context** (`Function`)
|
||||
- 📝 Test creating a sub-context with different source.
|
||||
- ƒ **test_context_logger_delegates_to_task_logger** (`Function`)
|
||||
- 📝 Test context logger delegates to TaskLogger.
|
||||
- ƒ **test_sub_context_with_source** (`Function`)
|
||||
- 📝 Test sub-context logger uses new source.
|
||||
- ƒ **test_multiple_sub_contexts** (`Function`)
|
||||
- 📝 Test creating multiple sub-contexts.
|
||||
- ƒ **test_belief_scope_logs_entry_action_exit_at_debug** (`Function`)
|
||||
- 📝 Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level.
|
||||
- ƒ **test_belief_scope_logs_entry_action_exit** (`Function`)
|
||||
- 📝 Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs.
|
||||
- ƒ **test_belief_scope_error_handling** (`Function`)
|
||||
- 📝 Test that belief_scope logs Coherence:Failed on exception.
|
||||
- ƒ **test_belief_scope_success_coherence** (`Function`)
|
||||
- 📝 Test that belief_scope logs Coherence:OK on success.
|
||||
- ƒ **test_belief_scope_not_visible_at_info** (`Function`)
|
||||
- 📝 Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level.
|
||||
- ƒ **test_task_log_level_default** (`Function`)
|
||||
- 📝 Test that default task log level is INFO.
|
||||
- ƒ **test_should_log_task_level** (`Function`)
|
||||
- 📝 Test that should_log_task_level correctly filters log levels.
|
||||
- ƒ **test_configure_logger_task_log_level** (`Function`)
|
||||
- 📝 Test that configure_logger updates task_log_level.
|
||||
- ƒ **test_enable_belief_state_flag** (`Function`)
|
||||
- 📝 Test that enable_belief_state flag controls belief_scope logging.
|
||||
- 📦 **test_auth** (`Module`) `[TRIVIAL]`
|
||||
- 📝 Auto-generated module for backend/tests/test_auth.py
|
||||
- 🏗️ Layer: Unknown
|
||||
@@ -2335,34 +1883,3 @@
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- ƒ **test_ad_group_mapping** (`Function`) `[TRIVIAL]`
|
||||
- 📝 Auto-detected function (orphan)
|
||||
- 📦 **test_log_persistence** (`Module`)
|
||||
- 📝 Unit tests for TaskLogPersistenceService.
|
||||
- 🏗️ Layer: Test
|
||||
- ℂ **TestLogPersistence** (`Class`)
|
||||
- 📝 Test suite for TaskLogPersistenceService.
|
||||
- ƒ **setup_class** (`Function`)
|
||||
- 📝 Setup test database and service instance.
|
||||
- ƒ **teardown_class** (`Function`)
|
||||
- 📝 Clean up test database.
|
||||
- ƒ **setup_method** (`Function`)
|
||||
- 📝 Setup for each test method.
|
||||
- ƒ **teardown_method** (`Function`)
|
||||
- 📝 Cleanup after each test method.
|
||||
- ƒ **test_add_log_single** (`Function`)
|
||||
- 📝 Test adding a single log entry.
|
||||
- ƒ **test_add_log_batch** (`Function`)
|
||||
- 📝 Test adding multiple log entries in batch.
|
||||
- ƒ **test_get_logs_by_task_id** (`Function`)
|
||||
- 📝 Test retrieving logs by task ID.
|
||||
- ƒ **test_get_logs_with_filters** (`Function`)
|
||||
- 📝 Test retrieving logs with level and source filters.
|
||||
- ƒ **test_get_logs_with_pagination** (`Function`)
|
||||
- 📝 Test retrieving logs with pagination.
|
||||
- ƒ **test_get_logs_with_search** (`Function`)
|
||||
- 📝 Test retrieving logs with search query.
|
||||
- ƒ **test_get_log_stats** (`Function`)
|
||||
- 📝 Test retrieving log statistics.
|
||||
- ƒ **test_get_log_sources** (`Function`)
|
||||
- 📝 Test retrieving unique log sources.
|
||||
- ƒ **test_delete_logs_by_task_id** (`Function`)
|
||||
- 📝 Test deleting logs by task ID.
|
||||
|
||||
Reference in New Issue
Block a user