Fix LLM validation and dashboard health hot paths

This commit is contained in:
2026-03-15 13:18:51 +03:00
parent 3928455189
commit a8563a8369
24 changed files with 1398 additions and 83 deletions

View File

@@ -547,12 +547,12 @@ async def get_dashboards(
)
try:
profile_preference = profile_service.get_my_preference(current_user).preference
profile_preference = profile_service.get_dashboard_filter_binding(current_user)
normalized_username = str(
getattr(profile_preference, "superset_username_normalized", None) or ""
profile_preference.get("superset_username_normalized") or ""
).strip().lower()
raw_username = str(
getattr(profile_preference, "superset_username", None) or ""
profile_preference.get("superset_username") or ""
).strip().lower()
bound_username = normalized_username or raw_username or None
@@ -560,14 +560,14 @@ async def get_dashboards(
page_context == "dashboards_main"
and bool(apply_profile_default)
and not bool(override_show_all)
and bool(getattr(profile_preference, "show_only_my_dashboards", False))
and bool(profile_preference.get("show_only_my_dashboards", False))
and bool(bound_username)
)
can_apply_slug_filter = (
page_context == "dashboards_main"
and bool(apply_profile_default)
and not bool(override_show_all)
and bool(getattr(profile_preference, "show_only_slug_dashboards", True))
and bool(profile_preference.get("show_only_slug_dashboards", True))
)
profile_match_logic = None

View File

@@ -5,27 +5,58 @@
# @LAYER: UI/API
# @RELATION: DEPENDS_ON -> health_service
from fastapi import APIRouter, Depends, Query
from fastapi import APIRouter, Depends, Query, HTTPException, status
from typing import List, Optional
from sqlalchemy.orm import Session
from ...core.database import get_db
from ...services.health_service import HealthService
from ...schemas.health import HealthSummaryResponse
from ...dependencies import has_permission
from ...dependencies import has_permission, get_config_manager, get_task_manager
router = APIRouter(prefix="/api/health", tags=["Health"])
# [DEF:get_health_summary:Function]
# @PURPOSE: Get aggregated health status for all dashboards.
# @PRE: Caller has read permission for dashboard health view.
# @POST: Returns HealthSummaryResponse.
# @RELATION: CALLS -> backend.src.services.health_service.HealthService
@router.get("/summary", response_model=HealthSummaryResponse)
async def get_health_summary(
environment_id: Optional[str] = Query(None),
db: Session = Depends(get_db),
config_manager = Depends(get_config_manager),
_ = Depends(has_permission("plugin:migration", "READ"))
):
"""
@PURPOSE: Get aggregated health status for all dashboards.
@POST: Returns HealthSummaryResponse
"""
service = HealthService(db)
service = HealthService(db, config_manager=config_manager)
return await service.get_health_summary(environment_id=environment_id)
# [/DEF:get_health_summary:Function]
# [/DEF:health_router:Module]
# [DEF:delete_health_report:Function]
# @PURPOSE: Delete one persisted dashboard validation report from health summary.
# @PRE: Caller has write permission for tasks/report maintenance.
# @POST: Validation record is removed; linked task/logs are cleaned when available.
# @RELATION: CALLS -> backend.src.services.health_service.HealthService
@router.delete("/summary/{record_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_health_report(
record_id: str,
db: Session = Depends(get_db),
config_manager = Depends(get_config_manager),
task_manager = Depends(get_task_manager),
_ = Depends(has_permission("tasks", "WRITE")),
):
"""
@PURPOSE: Delete a persisted dashboard validation report from health summary.
@POST: Validation record is removed; linked task/logs are deleted when present.
"""
service = HealthService(db, config_manager=config_manager)
if not service.delete_validation_report(record_id, task_manager=task_manager):
raise HTTPException(status_code=404, detail="Health report not found")
return
# [/DEF:delete_health_report:Function]
# [/DEF:health_router:Module]

View File

@@ -205,8 +205,7 @@ async def test_connection(
)
try:
# Simple test call
await client.client.models.list()
await client.test_runtime_connection()
return {"success": True, "message": "Connection successful"}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -242,8 +241,7 @@ async def test_provider_config(
)
try:
# Simple test call
await client.client.models.list()
await client.test_runtime_connection()
return {"success": True, "message": "Connection successful"}
except Exception as e:
return {"success": False, "error": str(e)}

View File

@@ -13,7 +13,7 @@
# [SECTION: IMPORTS]
from typing import List, Optional
from sqlalchemy.orm import Session
from sqlalchemy.orm import Session, selectinload
from ...models.auth import Permission, Role, User
from ...models.profile import UserDashboardPreference
@@ -53,7 +53,12 @@ class AuthRepository:
raise ValueError("username must be a non-empty string")
logger.reason(f"Querying user by username: {username}")
user = self.db.query(User).filter(User.username == username).first()
user = (
self.db.query(User)
.options(selectinload(User.roles).selectinload(Role.permissions))
.filter(User.username == username)
.first()
)
if user:
logger.reflect(f"User found: {username}")
@@ -199,4 +204,4 @@ class AuthRepository:
# [/DEF:AuthRepository:Class]
# [/DEF:backend.src.core.auth.repository:Module]
# [/DEF:backend.src.core.auth.repository:Module]

View File

@@ -0,0 +1,29 @@
# [DEF:backend.src.core.task_manager.__tests__.test_context:Module]
# @TIER: STANDARD
# @SEMANTICS: tests, task-context, background-tasks, sub-context
# @PURPOSE: Verify TaskContext preserves optional background task scheduler across sub-context creation.
from unittest.mock import MagicMock
from src.core.task_manager.context import TaskContext
# [DEF:test_task_context_preserves_background_tasks_across_sub_context:Function]
# @PURPOSE: Plugins must be able to access background_tasks from both root and sub-context loggers.
# @PRE: TaskContext is initialized with a BackgroundTasks-like object.
# @POST: background_tasks remains available on root and derived sub-contexts.
def test_task_context_preserves_background_tasks_across_sub_context():
background_tasks = MagicMock()
context = TaskContext(
task_id="task-1",
add_log_fn=lambda **_kwargs: None,
params={"x": 1},
background_tasks=background_tasks,
)
sub_context = context.create_sub_context("llm")
assert context.background_tasks is background_tasks
assert sub_context.background_tasks is background_tasks
# [/DEF:test_task_context_preserves_background_tasks_across_sub_context:Function]
# [/DEF:backend.src.core.task_manager.__tests__.test_context:Module]

View File

@@ -8,7 +8,7 @@
# [SECTION: IMPORTS]
# [SECTION: IMPORTS]
from typing import Dict, Any, Callable
from typing import Dict, Any, Callable, Optional
from .task_logger import TaskLogger
from ..logger import belief_scope
# [/SECTION]
@@ -58,11 +58,13 @@ class TaskContext:
task_id: str,
add_log_fn: Callable,
params: Dict[str, Any],
default_source: str = "plugin"
default_source: str = "plugin",
background_tasks: Optional[Any] = None,
):
with belief_scope("__init__"):
self._task_id = task_id
self._params = params
self._background_tasks = background_tasks
self._logger = TaskLogger(
task_id=task_id,
add_log_fn=add_log_fn,
@@ -102,6 +104,16 @@ class TaskContext:
with belief_scope("params"):
return self._params
# [/DEF:params:Function]
# [DEF:background_tasks:Function]
# @PURPOSE: Expose optional background task scheduler for plugins that dispatch deferred side effects.
# @PRE: TaskContext must be initialized.
# @POST: Returns BackgroundTasks-like object or None.
@property
def background_tasks(self) -> Optional[Any]:
with belief_scope("background_tasks"):
return self._background_tasks
# [/DEF:background_tasks:Function]
# [DEF:get_param:Function]
# @PURPOSE: Get a specific parameter value with optional default.
@@ -128,7 +140,8 @@ class TaskContext:
task_id=self._task_id,
add_log_fn=self._logger._add_log,
params=self._params,
default_source=source
default_source=source,
background_tasks=self._background_tasks,
)
# [/DEF:create_sub_context:Function]

View File

@@ -208,7 +208,8 @@ class TaskManager:
task_id=task_id,
add_log_fn=self._add_log,
params=params,
default_source="plugin"
default_source="plugin",
background_tasks=None,
)
if asyncio.iscoroutinefunction(plugin.execute):

View File

@@ -0,0 +1,30 @@
# [DEF:backend.src.plugins.llm_analysis.__tests__.test_client_headers:Module]
# @TIER: STANDARD
# @SEMANTICS: tests, llm-client, openrouter, headers
# @PURPOSE: Verify OpenRouter client initialization includes provider-specific headers.
from src.plugins.llm_analysis.models import LLMProviderType
from src.plugins.llm_analysis.service import LLMClient
# [DEF:test_openrouter_client_includes_referer_and_title_headers:Function]
# @PURPOSE: OpenRouter requests should carry site/app attribution headers for compatibility.
# @PRE: Client is initialized for OPENROUTER provider.
# @POST: Async client headers include Authorization, HTTP-Referer, and X-Title.
def test_openrouter_client_includes_referer_and_title_headers(monkeypatch):
monkeypatch.setenv("OPENROUTER_SITE_URL", "http://localhost:8000")
monkeypatch.setenv("OPENROUTER_APP_NAME", "ss-tools-test")
client = LLMClient(
provider_type=LLMProviderType.OPENROUTER,
api_key="sk-test-provider-key-123456",
base_url="https://openrouter.ai/api/v1",
default_model="nvidia/nemotron-nano-12b-v2-vl:free",
)
headers = dict(client.client.default_headers)
assert headers["Authorization"] == "Bearer sk-test-provider-key-123456"
assert headers["HTTP-Referer"] == "http://localhost:8000"
assert headers["X-Title"] == "ss-tools-test"
# [/DEF:test_openrouter_client_includes_referer_and_title_headers:Function]
# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_client_headers:Module]

View File

@@ -0,0 +1,344 @@
# [DEF:backend.src.plugins.llm_analysis.__tests__.test_screenshot_service:Module]
# @TIER: STANDARD
# @SEMANTICS: tests, screenshot-service, navigation, timeout-regression
# @PURPOSE: Protect dashboard screenshot navigation from brittle networkidle waits.
import pytest
from src.plugins.llm_analysis.service import ScreenshotService
# [DEF:test_iter_login_roots_includes_child_frames:Function]
# @PURPOSE: Login discovery must search embedded auth frames, not only the main page.
# @PRE: Page exposes child frames list.
# @POST: Returned roots include page plus child frames in order.
def test_iter_login_roots_includes_child_frames():
frame_a = object()
frame_b = object()
fake_page = type("FakePage", (), {"frames": [frame_a, frame_b]})()
service = ScreenshotService(env=type("Env", (), {})())
roots = service._iter_login_roots(fake_page)
assert roots == [fake_page, frame_a, frame_b]
# [/DEF:test_iter_login_roots_includes_child_frames:Function]
# [DEF:test_response_looks_like_login_page_detects_login_markup:Function]
# @PURPOSE: Direct login fallback must reject responses that render the login screen again.
# @PRE: Response body contains stable login-page markers.
# @POST: Helper returns True so caller treats fallback as failed authentication.
def test_response_looks_like_login_page_detects_login_markup():
service = ScreenshotService(env=type("Env", (), {})())
result = service._response_looks_like_login_page(
"""
<html>
<body>
<p>Enter your login and password below</p>
<label>Username:</label>
<label>Password:</label>
<button>Sign in</button>
</body>
</html>
"""
)
assert result is True
# [/DEF:test_response_looks_like_login_page_detects_login_markup:Function]
# [DEF:test_find_first_visible_locator_skips_hidden_first_match:Function]
# @PURPOSE: Locator helper must not reject a selector collection just because its first element is hidden.
# @PRE: First matched element is hidden and second matched element is visible.
# @POST: Helper returns the second visible candidate.
@pytest.mark.anyio
async def test_find_first_visible_locator_skips_hidden_first_match():
class _FakeElement:
def __init__(self, visible, label):
self._visible = visible
self.label = label
async def is_visible(self):
return self._visible
class _FakeLocator:
def __init__(self, elements):
self._elements = elements
async def count(self):
return len(self._elements)
def nth(self, index):
return self._elements[index]
service = ScreenshotService(env=type("Env", (), {})())
hidden_then_visible = _FakeLocator([
_FakeElement(False, "hidden"),
_FakeElement(True, "visible"),
])
result = await service._find_first_visible_locator([hidden_then_visible])
assert result.label == "visible"
# [/DEF:test_find_first_visible_locator_skips_hidden_first_match:Function]
# [DEF:test_submit_login_via_form_post_uses_browser_context_request:Function]
# @PURPOSE: Fallback login must submit hidden fields and credentials through the context request cookie jar.
# @PRE: Login DOM exposes csrf hidden field and request context returns authenticated HTML.
# @POST: Helper returns True and request payload contains csrf_token plus credentials plus request options.
@pytest.mark.anyio
async def test_submit_login_via_form_post_uses_browser_context_request():
class _FakeInput:
def __init__(self, name, value):
self._name = name
self._value = value
async def get_attribute(self, attr_name):
return self._name if attr_name == "name" else None
async def input_value(self):
return self._value
class _FakeLocator:
def __init__(self, items):
self._items = items
async def count(self):
return len(self._items)
def nth(self, index):
return self._items[index]
class _FakeResponse:
status = 200
url = "https://example.test/welcome/"
async def text(self):
return "<html><body>Welcome</body></html>"
class _FakeRequest:
def __init__(self):
self.calls = []
async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None):
self.calls.append({
"url": url,
"form": dict(form or {}),
"headers": dict(headers or {}),
"timeout": timeout,
"fail_on_status_code": fail_on_status_code,
"max_redirects": max_redirects,
})
return _FakeResponse()
class _FakeContext:
def __init__(self):
self.request = _FakeRequest()
class _FakePage:
def __init__(self):
self.frames = []
self.context = _FakeContext()
def locator(self, selector):
if selector == "input[type='hidden'][name]":
return _FakeLocator([
_FakeInput("csrf_token", "csrf-123"),
_FakeInput("next", "/superset/welcome/"),
])
return _FakeLocator([])
env = type("Env", (), {"username": "admin", "password": "secret"})()
service = ScreenshotService(env=env)
page = _FakePage()
result = await service._submit_login_via_form_post(page, "https://example.test/login/")
assert result is True
assert page.context.request.calls == [{
"url": "https://example.test/login/",
"form": {
"csrf_token": "csrf-123",
"next": "/superset/welcome/",
"username": "admin",
"password": "secret",
},
"headers": {
"Origin": "https://example.test",
"Referer": "https://example.test/login/",
},
"timeout": 10000,
"fail_on_status_code": False,
"max_redirects": 0,
}]
# [/DEF:test_submit_login_via_form_post_uses_browser_context_request:Function]
# [DEF:test_submit_login_via_form_post_accepts_authenticated_redirect:Function]
# @PURPOSE: Fallback login must treat non-login 302 redirect as success without waiting for redirect target.
# @PRE: Request response is 302 with Location outside login path.
# @POST: Helper returns True.
@pytest.mark.anyio
async def test_submit_login_via_form_post_accepts_authenticated_redirect():
class _FakeInput:
def __init__(self, name, value):
self._name = name
self._value = value
async def get_attribute(self, attr_name):
return self._name if attr_name == "name" else None
async def input_value(self):
return self._value
class _FakeLocator:
def __init__(self, items):
self._items = items
async def count(self):
return len(self._items)
def nth(self, index):
return self._items[index]
class _FakeResponse:
status = 302
url = "https://example.test/login/"
headers = {"location": "/superset/welcome/"}
async def text(self):
return ""
class _FakeRequest:
async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None):
return _FakeResponse()
class _FakeContext:
def __init__(self):
self.request = _FakeRequest()
class _FakePage:
def __init__(self):
self.frames = []
self.context = _FakeContext()
def locator(self, selector):
if selector == "input[type='hidden'][name]":
return _FakeLocator([_FakeInput("csrf_token", "csrf-123")])
return _FakeLocator([])
env = type("Env", (), {"username": "admin", "password": "secret"})()
service = ScreenshotService(env=env)
result = await service._submit_login_via_form_post(_FakePage(), "https://example.test/login/")
assert result is True
# [/DEF:test_submit_login_via_form_post_accepts_authenticated_redirect:Function]
# [DEF:test_submit_login_via_form_post_rejects_login_markup_response:Function]
# @PURPOSE: Fallback login must fail when POST response still contains login form content.
# @PRE: Login DOM exposes csrf hidden field and request response renders login markup.
# @POST: Helper returns False.
@pytest.mark.anyio
async def test_submit_login_via_form_post_rejects_login_markup_response():
class _FakeInput:
def __init__(self, name, value):
self._name = name
self._value = value
async def get_attribute(self, attr_name):
return self._name if attr_name == "name" else None
async def input_value(self):
return self._value
class _FakeLocator:
def __init__(self, items):
self._items = items
async def count(self):
return len(self._items)
def nth(self, index):
return self._items[index]
class _FakeResponse:
status = 200
url = "https://example.test/login/"
async def text(self):
return """
<html>
<body>
Enter your login and password below
Username:
Password:
Sign in
</body>
</html>
"""
class _FakeRequest:
async def post(self, url, form=None, headers=None, timeout=None, fail_on_status_code=None, max_redirects=None):
return _FakeResponse()
class _FakeContext:
def __init__(self):
self.request = _FakeRequest()
class _FakePage:
def __init__(self):
self.frames = []
self.context = _FakeContext()
def locator(self, selector):
if selector == "input[type='hidden'][name]":
return _FakeLocator([_FakeInput("csrf_token", "csrf-123")])
return _FakeLocator([])
env = type("Env", (), {"username": "admin", "password": "secret"})()
service = ScreenshotService(env=env)
result = await service._submit_login_via_form_post(_FakePage(), "https://example.test/login/")
assert result is False
# [/DEF:test_submit_login_via_form_post_rejects_login_markup_response:Function]
# [DEF:test_goto_resilient_falls_back_from_domcontentloaded_to_load:Function]
# @PURPOSE: Pages with unstable primary wait must retry with fallback wait strategy.
# @PRE: First page.goto call raises; second succeeds.
# @POST: Helper returns second response and attempts both wait modes in order.
@pytest.mark.anyio
async def test_goto_resilient_falls_back_from_domcontentloaded_to_load():
class _FakePage:
def __init__(self):
self.calls = []
async def goto(self, url, wait_until, timeout):
self.calls.append((url, wait_until, timeout))
if wait_until == "domcontentloaded":
raise RuntimeError("primary wait failed")
return {"ok": True, "url": url, "wait_until": wait_until}
page = _FakePage()
service = ScreenshotService(env=type("Env", (), {})())
response = await service._goto_resilient(
page,
"https://example.test/dashboard",
primary_wait_until="domcontentloaded",
fallback_wait_until="load",
timeout=1234,
)
assert response["ok"] is True
assert page.calls == [
("https://example.test/dashboard", "domcontentloaded", 1234),
("https://example.test/dashboard", "load", 1234),
]
# [/DEF:test_goto_resilient_falls_back_from_domcontentloaded_to_load:Function]
# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_screenshot_service:Module]

View File

@@ -0,0 +1,67 @@
# [DEF:backend.src.plugins.llm_analysis.__tests__.test_service:Module]
# @TIER: STANDARD
# @SEMANTICS: tests, llm-analysis, fallback, provider-error, unknown-status
# @PURPOSE: Verify LLM analysis transport/provider failures do not masquerade as dashboard FAIL results.
import pytest
from src.plugins.llm_analysis.models import LLMProviderType
from src.plugins.llm_analysis.service import LLMClient
# [DEF:test_test_runtime_connection_uses_json_completion_transport:Function]
# @PURPOSE: Provider self-test must exercise the same chat completion transport as runtime analysis.
# @PRE: get_json_completion is available on initialized client.
# @POST: Self-test forwards a lightweight user message into get_json_completion and returns its payload.
@pytest.mark.anyio
async def test_test_runtime_connection_uses_json_completion_transport(monkeypatch):
client = LLMClient(
provider_type=LLMProviderType.OPENROUTER,
api_key="sk-test-provider-key-123456",
base_url="https://openrouter.ai/api/v1",
default_model="nvidia/nemotron-nano-12b-v2-vl:free",
)
recorded = {}
async def _fake_get_json_completion(messages):
recorded["messages"] = messages
return {"ok": True}
monkeypatch.setattr(client, "get_json_completion", _fake_get_json_completion)
result = await client.test_runtime_connection()
assert result == {"ok": True}
assert recorded["messages"][0]["role"] == "user"
assert "Return exactly this JSON object" in recorded["messages"][0]["content"]
# [/DEF:test_test_runtime_connection_uses_json_completion_transport:Function]
# [DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function]
# @PURPOSE: Infrastructure/provider failures must produce UNKNOWN analysis status rather than FAIL.
# @PRE: LLMClient.get_json_completion raises provider/auth exception.
# @POST: Returned payload uses status=UNKNOWN and issue severity UNKNOWN.
@pytest.mark.anyio
async def test_analyze_dashboard_provider_error_maps_to_unknown(monkeypatch, tmp_path):
screenshot_path = tmp_path / "shot.jpg"
screenshot_path.write_bytes(b"fake-image")
client = LLMClient(
provider_type=LLMProviderType.OPENROUTER,
api_key="sk-test-provider-key-123456",
base_url="https://openrouter.ai/api/v1",
default_model="nvidia/nemotron-nano-12b-v2-vl:free",
)
async def _raise_provider_error(_messages):
raise RuntimeError("Error code: 401 - {'error': {'message': 'User not found.', 'code': 401}}")
monkeypatch.setattr(client, "get_json_completion", _raise_provider_error)
result = await client.analyze_dashboard(str(screenshot_path), logs=["line-1"])
assert result["status"] == "UNKNOWN"
assert "Failed to get response from LLM" in result["summary"]
assert result["issues"][0]["severity"] == "UNKNOWN"
# [/DEF:test_analyze_dashboard_provider_error_maps_to_unknown:Function]
# [/DEF:backend.src.plugins.llm_analysis.__tests__.test_service:Module]

View File

@@ -35,6 +35,7 @@ class ValidationStatus(str, Enum):
PASS = "PASS"
WARN = "WARN"
FAIL = "FAIL"
UNKNOWN = "UNKNOWN"
# [/DEF:ValidationStatus:Class]
# [DEF:DetectedIssue:Class]

View File

@@ -307,7 +307,7 @@ class DashboardValidationPlugin(PluginBase):
await notification_service.dispatch_report(
record=db_record,
policy=policy,
background_tasks=context.background_tasks if context else None
background_tasks=getattr(context, "background_tasks", None) if context else None
)
except Exception as e:
log.error(f"Failed to dispatch notifications: {e}")

View File

@@ -12,6 +12,8 @@ import asyncio
import base64
import json
import io
import os
from urllib.parse import urlsplit
from typing import List, Dict, Any
import httpx
from PIL import Image
@@ -33,6 +35,242 @@ class ScreenshotService:
self.env = env
# [/DEF:ScreenshotService.__init__:Function]
# [DEF:ScreenshotService._find_first_visible_locator:Function]
# @PURPOSE: Resolve the first visible locator from multiple Playwright locator strategies.
# @PRE: candidates is a non-empty list of locator-like objects.
# @POST: Returns a locator ready for interaction or None when nothing matches.
async def _find_first_visible_locator(self, candidates) -> Any:
for locator in candidates:
try:
match_count = await locator.count()
for index in range(match_count):
candidate = locator.nth(index)
if await candidate.is_visible():
return candidate
except Exception:
continue
return None
# [/DEF:ScreenshotService._find_first_visible_locator:Function]
# [DEF:ScreenshotService._iter_login_roots:Function]
# @PURPOSE: Enumerate page and child frames where login controls may be rendered.
# @PRE: page is a Playwright page-like object.
# @POST: Returns ordered roots starting with main page followed by frames.
def _iter_login_roots(self, page) -> List[Any]:
roots = [page]
page_frames = getattr(page, "frames", [])
try:
for frame in page_frames:
if frame not in roots:
roots.append(frame)
except Exception:
pass
return roots
# [/DEF:ScreenshotService._iter_login_roots:Function]
# [DEF:ScreenshotService._extract_hidden_login_fields:Function]
# @PURPOSE: Collect hidden form fields required for direct login POST fallback.
# @PRE: Login page is loaded.
# @POST: Returns hidden input name/value mapping aggregated from page and child frames.
async def _extract_hidden_login_fields(self, page) -> Dict[str, str]:
hidden_fields: Dict[str, str] = {}
for root in self._iter_login_roots(page):
try:
locator = root.locator("input[type='hidden'][name]")
count = await locator.count()
for index in range(count):
candidate = locator.nth(index)
field_name = str(await candidate.get_attribute("name") or "").strip()
if not field_name or field_name in hidden_fields:
continue
hidden_fields[field_name] = str(await candidate.input_value()).strip()
except Exception:
continue
return hidden_fields
# [/DEF:ScreenshotService._extract_hidden_login_fields:Function]
# [DEF:ScreenshotService._extract_csrf_token:Function]
# @PURPOSE: Resolve CSRF token value from main page or embedded login frame.
# @PRE: Login page is loaded.
# @POST: Returns first non-empty csrf token or empty string.
async def _extract_csrf_token(self, page) -> str:
hidden_fields = await self._extract_hidden_login_fields(page)
return str(hidden_fields.get("csrf_token") or "").strip()
# [/DEF:ScreenshotService._extract_csrf_token:Function]
# [DEF:ScreenshotService._response_looks_like_login_page:Function]
# @PURPOSE: Detect when fallback login POST returned the login form again instead of an authenticated page.
# @PRE: response_text is normalized HTML or text from login POST response.
# @POST: Returns True when login-page markers dominate the response body.
def _response_looks_like_login_page(self, response_text: str) -> bool:
normalized = str(response_text or "").strip().lower()
if not normalized:
return False
markers = [
"enter your login and password below",
"username:",
"password:",
"sign in",
'name="csrf_token"',
]
return sum(marker in normalized for marker in markers) >= 3
# [/DEF:ScreenshotService._response_looks_like_login_page:Function]
# [DEF:ScreenshotService._redirect_looks_authenticated:Function]
# @PURPOSE: Treat non-login redirects after form POST as successful authentication without waiting for redirect target.
# @PRE: redirect_location may be empty or relative.
# @POST: Returns True when redirect target does not point back to login flow.
def _redirect_looks_authenticated(self, redirect_location: str) -> bool:
normalized = str(redirect_location or "").strip().lower()
if not normalized:
return True
return "/login" not in normalized
# [/DEF:ScreenshotService._redirect_looks_authenticated:Function]
# [DEF:ScreenshotService._submit_login_via_form_post:Function]
# @PURPOSE: Fallback login path that submits credentials directly with csrf token.
# @PRE: login_url is same-origin and csrf token can be read from DOM.
# @POST: Browser context receives authenticated cookies when login succeeds.
async def _submit_login_via_form_post(self, page, login_url: str) -> bool:
hidden_fields = await self._extract_hidden_login_fields(page)
csrf_token = str(hidden_fields.get("csrf_token") or "").strip()
if not csrf_token:
logger.warning("[DEBUG] Direct form login fallback skipped: csrf_token not found")
return False
try:
request_context = page.context.request
except Exception as context_error:
logger.warning(f"[DEBUG] Direct form login fallback skipped: request context unavailable: {context_error}")
return False
parsed_url = urlsplit(login_url)
origin = f"{parsed_url.scheme}://{parsed_url.netloc}" if parsed_url.scheme and parsed_url.netloc else login_url
payload = dict(hidden_fields)
payload["username"] = self.env.username
payload["password"] = self.env.password
logger.info(
f"[DEBUG] Attempting direct form login fallback via browser context request with hidden fields: {sorted(hidden_fields.keys())}"
)
response = await request_context.post(
login_url,
form=payload,
headers={
"Origin": origin,
"Referer": login_url,
},
timeout=10000,
fail_on_status_code=False,
max_redirects=0,
)
response_url = str(getattr(response, "url", "") or "")
response_status = int(getattr(response, "status", 0) or 0)
response_headers = dict(getattr(response, "headers", {}) or {})
redirect_location = str(
response_headers.get("location")
or response_headers.get("Location")
or ""
).strip()
redirect_statuses = {301, 302, 303, 307, 308}
if response_status in redirect_statuses:
redirect_authenticated = self._redirect_looks_authenticated(redirect_location)
logger.info(
f"[DEBUG] Direct form login fallback redirect response: status={response_status} url={response_url} location={redirect_location!r} authenticated={redirect_authenticated}"
)
return redirect_authenticated
response_text = await response.text()
text_snippet = " ".join(response_text.split())[:200]
looks_like_login_page = self._response_looks_like_login_page(response_text)
logger.info(
f"[DEBUG] Direct form login fallback response: status={response_status} url={response_url} login_markup={looks_like_login_page} snippet={text_snippet!r}"
)
return not looks_like_login_page
# [/DEF:ScreenshotService._submit_login_via_form_post:Function]
# [DEF:ScreenshotService._find_login_field_locator:Function]
# @PURPOSE: Resolve login form input using semantic label text plus generic visible-input fallbacks.
# @PRE: field_name is `username` or `password`.
# @POST: Returns a locator for the corresponding input or None.
async def _find_login_field_locator(self, page, field_name: str) -> Any:
normalized = str(field_name or "").strip().lower()
for root in self._iter_login_roots(page):
if normalized == "username":
input_candidates = [
root.get_by_label("Username", exact=False),
root.get_by_label("Login", exact=False),
root.locator("label:text-matches('Username|Login', 'i')").locator("xpath=following::input[1]"),
root.locator("text=/Username|Login/i").locator("xpath=following::input[1]"),
root.locator("input[name='username']"),
root.locator("input#username"),
root.locator("input[placeholder*='Username']"),
root.locator("input[type='text']"),
root.locator("input:not([type='password'])"),
]
locator = await self._find_first_visible_locator(input_candidates)
if locator:
return locator
if normalized == "password":
input_candidates = [
root.get_by_label("Password", exact=False),
root.locator("label:text-matches('Password', 'i')").locator("xpath=following::input[1]"),
root.locator("text=/Password/i").locator("xpath=following::input[1]"),
root.locator("input[name='password']"),
root.locator("input#password"),
root.locator("input[placeholder*='Password']"),
root.locator("input[type='password']"),
]
locator = await self._find_first_visible_locator(input_candidates)
if locator:
return locator
return None
# [/DEF:ScreenshotService._find_login_field_locator:Function]
# [DEF:ScreenshotService._find_submit_locator:Function]
# @PURPOSE: Resolve login submit button from main page or embedded auth frame.
# @PRE: page is ready for login interaction.
# @POST: Returns visible submit locator or None.
async def _find_submit_locator(self, page) -> Any:
selectors = [
lambda root: root.get_by_role("button", name="Sign in", exact=False),
lambda root: root.get_by_role("button", name="Login", exact=False),
lambda root: root.locator("button[type='submit']"),
lambda root: root.locator("button#submit"),
lambda root: root.locator(".btn-primary"),
lambda root: root.locator("input[type='submit']"),
]
for root in self._iter_login_roots(page):
locator = await self._find_first_visible_locator([factory(root) for factory in selectors])
if locator:
return locator
return None
# [/DEF:ScreenshotService._find_submit_locator:Function]
# [DEF:ScreenshotService._goto_resilient:Function]
# @PURPOSE: Navigate without relying on networkidle for pages with long-polling or persistent requests.
# @PRE: page is a valid Playwright page and url is non-empty.
# @POST: Returns last navigation response or raises when both primary and fallback waits fail.
async def _goto_resilient(
self,
page,
url: str,
primary_wait_until: str = "domcontentloaded",
fallback_wait_until: str = "load",
timeout: int = 60000,
):
try:
return await page.goto(url, wait_until=primary_wait_until, timeout=timeout)
except Exception as primary_error:
logger.warning(
f"[ScreenshotService._goto_resilient] Primary navigation wait '{primary_wait_until}' failed for {url}: {primary_error}"
)
return await page.goto(url, wait_until=fallback_wait_until, timeout=timeout)
# [/DEF:ScreenshotService._goto_resilient:Function]
# [DEF:ScreenshotService.capture_dashboard:Function]
# @PURPOSE: Captures a full-page screenshot of a dashboard using Playwright and CDP.
# @PRE: dashboard_id is a valid string, output_path is a writable path.
@@ -85,7 +323,13 @@ class ScreenshotService:
login_url = f"{base_ui_url.rstrip('/')}/login/"
logger.info(f"[DEBUG] Navigating to login page: {login_url}")
response = await page.goto(login_url, wait_until="networkidle", timeout=60000)
response = await self._goto_resilient(
page,
login_url,
primary_wait_until="domcontentloaded",
fallback_wait_until="load",
timeout=60000,
)
if response:
logger.info(f"[DEBUG] Login page response status: {response.status}")
@@ -101,57 +345,59 @@ class ScreenshotService:
logger.info("[DEBUG] Attempting to find login form elements...")
try:
used_direct_form_login = False
# Find and fill username
u_selector = None
for s in selectors["username"]:
count = await page.locator(s).count()
logger.info(f"[DEBUG] Selector '{s}': {count} elements found")
if count > 0:
u_selector = s
break
username_locator = await self._find_login_field_locator(page, "username")
if not username_locator:
roots = self._iter_login_roots(page)
logger.info(f"[DEBUG] Found {len(roots)} login roots including child frames")
for root_index, root in enumerate(roots[:5]):
all_inputs = await root.locator('input').all()
logger.info(f"[DEBUG] Root {root_index}: found {len(all_inputs)} input fields")
for i, inp in enumerate(all_inputs[:5]): # Log first 5
inp_type = await inp.get_attribute('type')
inp_name = await inp.get_attribute('name')
inp_id = await inp.get_attribute('id')
logger.info(f"[DEBUG] Root {root_index} input {i}: type={inp_type}, name={inp_name}, id={inp_id}")
used_direct_form_login = await self._submit_login_via_form_post(page, login_url)
if not used_direct_form_login:
raise RuntimeError("Could not find username input field on login page")
username_locator = None
if not u_selector:
# Log all input fields on the page for debugging
all_inputs = await page.locator('input').all()
logger.info(f"[DEBUG] Found {len(all_inputs)} input fields on page")
for i, inp in enumerate(all_inputs[:5]): # Log first 5
inp_type = await inp.get_attribute('type')
inp_name = await inp.get_attribute('name')
inp_id = await inp.get_attribute('id')
logger.info(f"[DEBUG] Input {i}: type={inp_type}, name={inp_name}, id={inp_id}")
raise RuntimeError("Could not find username input field on login page")
logger.info(f"[DEBUG] Filling username field with selector: {u_selector}")
await page.fill(u_selector, self.env.username)
if username_locator is not None:
logger.info("[DEBUG] Filling username field")
await username_locator.fill(self.env.username)
# Find and fill password
p_selector = None
for s in selectors["password"]:
if await page.locator(s).count() > 0:
p_selector = s
break
if not p_selector:
password_locator = await self._find_login_field_locator(page, "password") if username_locator is not None else None
if username_locator is not None and not password_locator:
raise RuntimeError("Could not find password input field on login page")
logger.info(f"[DEBUG] Filling password field with selector: {p_selector}")
await page.fill(p_selector, self.env.password)
if password_locator is not None:
logger.info("[DEBUG] Filling password field")
await password_locator.fill(self.env.password)
# Click submit
s_selector = selectors["submit"][0]
for s in selectors["submit"]:
if await page.locator(s).count() > 0:
s_selector = s
break
logger.info(f"[DEBUG] Clicking submit button with selector: {s_selector}")
await page.click(s_selector)
submit_locator = await self._find_submit_locator(page) if username_locator is not None else None
if username_locator is not None and not submit_locator:
raise RuntimeError("Could not find submit button on login page")
if submit_locator is not None:
logger.info("[DEBUG] Clicking submit button")
await submit_locator.click()
# Wait for navigation after login
await page.wait_for_load_state("networkidle", timeout=30000)
if not used_direct_form_login:
try:
await page.wait_for_load_state("load", timeout=30000)
except Exception as load_wait_error:
logger.warning(f"[DEBUG] Login post-submit load wait timed out: {load_wait_error}")
# Check if login was successful
if "/login" in page.url:
if not used_direct_form_login and "/login" in page.url:
# Check for error messages on page
error_msg = await page.locator(".alert-danger, .error-message").text_content() if await page.locator(".alert-danger, .error-message").count() > 0 else "Unknown error"
logger.error(f"[DEBUG] Login failed. Still on login page. Error: {error_msg}")
@@ -183,11 +429,24 @@ class ScreenshotService:
logger.info(f"[DEBUG] Navigating to dashboard: {dashboard_url}")
# Use networkidle to ensure all initial assets are loaded
response = await page.goto(dashboard_url, wait_until="networkidle", timeout=60000)
# Dashboard pages can keep polling/network activity open indefinitely.
response = await self._goto_resilient(
page,
dashboard_url,
primary_wait_until="domcontentloaded",
fallback_wait_until="load",
timeout=60000,
)
if response:
logger.info(f"[DEBUG] Dashboard navigation response status: {response.status}, URL: {response.url}")
if "/login" in page.url:
debug_path = output_path.replace(".png", "_debug_failed_dashboard_auth.png")
await page.screenshot(path=debug_path)
raise RuntimeError(
f"Dashboard navigation redirected to login page after authentication. Debug screenshot saved to {debug_path}"
)
try:
# Wait for the dashboard grid to be present
@@ -440,6 +699,13 @@ class LLMClient:
# Some OpenAI-compatible gateways are strict about auth header naming.
default_headers = {"Authorization": f"Bearer {self.api_key}"}
if self.provider_type == LLMProviderType.OPENROUTER:
default_headers["HTTP-Referer"] = (
os.getenv("OPENROUTER_SITE_URL", "").strip()
or os.getenv("APP_BASE_URL", "").strip()
or "http://localhost:8000"
)
default_headers["X-Title"] = os.getenv("OPENROUTER_APP_NAME", "").strip() or "ss-tools"
if self.provider_type == LLMProviderType.KILO:
default_headers["Authentication"] = f"Bearer {self.api_key}"
default_headers["X-API-Key"] = self.api_key
@@ -595,6 +861,22 @@ class LLMClient:
raise
# [/DEF:LLMClient.get_json_completion:Function]
# [DEF:LLMClient.test_runtime_connection:Function]
# @PURPOSE: Validate provider credentials using the same chat completions transport as runtime analysis.
# @PRE: Client is initialized with provider credentials and default_model.
# @POST: Returns lightweight JSON payload when runtime auth/model path is valid.
# @SIDE_EFFECT: Calls external LLM API.
async def test_runtime_connection(self) -> Dict[str, Any]:
with belief_scope("test_runtime_connection"):
messages = [
{
"role": "user",
"content": 'Return exactly this JSON object and nothing else: {"ok": true}',
}
]
return await self.get_json_completion(messages)
# [/DEF:LLMClient.test_runtime_connection:Function]
# [DEF:LLMClient.analyze_dashboard:Function]
# @PURPOSE: Sends dashboard data (screenshot + logs) to LLM for health analysis.
# @PRE: screenshot_path exists, logs is a list of strings.
@@ -661,9 +943,9 @@ class LLMClient:
except Exception as e:
logger.error(f"[analyze_dashboard] Failed to get analysis: {str(e)}")
return {
"status": "FAIL",
"status": "UNKNOWN",
"summary": f"Failed to get response from LLM: {str(e)}",
"issues": [{"severity": "FAIL", "message": "LLM provider returned empty or invalid response"}]
"issues": [{"severity": "UNKNOWN", "message": "LLM provider returned empty or invalid response"}]
}
# [/DEF:LLMClient.analyze_dashboard:Function]
# [/DEF:LLMClient:Class]

View File

@@ -11,7 +11,9 @@ from datetime import datetime
# [DEF:DashboardHealthItem:Class]
# @PURPOSE: Represents the latest health status of a single dashboard.
class DashboardHealthItem(BaseModel):
record_id: str
dashboard_id: str
dashboard_slug: Optional[str] = None
dashboard_title: Optional[str] = None
environment_id: str
status: str = Field(..., pattern="^(PASS|WARN|FAIL|UNKNOWN)$")
@@ -30,4 +32,4 @@ class HealthSummaryResponse(BaseModel):
unknown_count: int
# [/DEF:HealthSummaryResponse:Class]
# [/DEF:backend.src.schemas.health:Module]
# [/DEF:backend.src.schemas.health:Module]

View File

@@ -1,6 +1,6 @@
import pytest
from datetime import datetime, timedelta
from unittest.mock import MagicMock
from unittest.mock import MagicMock, patch
from src.services.health_service import HealthService
from src.models.llm import ValidationRecord
@@ -20,6 +20,7 @@ async def test_get_health_summary_aggregation():
# Dashboard 1: Old FAIL, New PASS
rec1_old = ValidationRecord(
id="rec-old",
dashboard_id="dash_1",
environment_id="env_1",
status="FAIL",
@@ -28,6 +29,7 @@ async def test_get_health_summary_aggregation():
issues=[]
)
rec1_new = ValidationRecord(
id="rec-new",
dashboard_id="dash_1",
environment_id="env_1",
status="PASS",
@@ -38,6 +40,7 @@ async def test_get_health_summary_aggregation():
# Dashboard 2: Single WARN
rec2 = ValidationRecord(
id="rec-warn",
dashboard_id="dash_2",
environment_id="env_1",
status="WARN",
@@ -69,6 +72,8 @@ async def test_get_health_summary_aggregation():
dash_1_item = next(item for item in summary.items if item.dashboard_id == "dash_1")
assert dash_1_item.status == "PASS"
assert dash_1_item.summary == "New pass"
assert dash_1_item.record_id == rec1_new.id
assert dash_1_item.dashboard_slug == "dash_1"
@pytest.mark.asyncio
async def test_get_health_summary_empty():
@@ -84,4 +89,160 @@ async def test_get_health_summary_empty():
assert summary.pass_count == 0
assert len(summary.items) == 0
# [/DEF:test_health_service:Module]
@pytest.mark.asyncio
async def test_get_health_summary_resolves_slug_and_title_from_superset():
db = MagicMock()
config_manager = MagicMock()
config_manager.get_environments.return_value = [MagicMock(id="env_1")]
record = ValidationRecord(
id="rec-1",
dashboard_id="42",
environment_id="env_1",
status="PASS",
timestamp=datetime.utcnow(),
summary="Healthy",
issues=[],
)
db.query.return_value.join.return_value.all.return_value = [record]
with patch("src.services.health_service.SupersetClient") as mock_client_cls:
mock_client = MagicMock()
mock_client.get_dashboards_summary.return_value = [
{"id": 42, "slug": "ops-overview", "title": "Ops Overview"}
]
mock_client_cls.return_value = mock_client
service = HealthService(db, config_manager=config_manager)
summary = await service.get_health_summary(environment_id="env_1")
assert summary.items[0].dashboard_slug == "ops-overview"
assert summary.items[0].dashboard_title == "Ops Overview"
mock_client.get_dashboards_summary.assert_called_once_with()
def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks():
db = MagicMock()
config_manager = MagicMock()
task_manager = MagicMock()
task_manager.tasks = {"task-1": object(), "task-2": object(), "task-3": object()}
target_record = ValidationRecord(
id="rec-1",
task_id="task-1",
dashboard_id="42",
environment_id="env_1",
status="PASS",
timestamp=datetime.utcnow(),
summary="Healthy",
issues=[],
screenshot_path=None,
)
older_peer = ValidationRecord(
id="rec-2",
task_id="task-2",
dashboard_id="42",
environment_id="env_1",
status="FAIL",
timestamp=datetime.utcnow() - timedelta(hours=1),
summary="Older",
issues=[],
screenshot_path=None,
)
other_environment = ValidationRecord(
id="rec-3",
task_id="task-3",
dashboard_id="42",
environment_id="env_2",
status="WARN",
timestamp=datetime.utcnow(),
summary="Other environment",
issues=[],
screenshot_path=None,
)
first_query = MagicMock()
first_query.first.return_value = target_record
peer_query = MagicMock()
peer_query.filter.return_value = peer_query
peer_query.all.return_value = [target_record, older_peer]
db.query.side_effect = [first_query, peer_query]
with patch("src.services.health_service.TaskCleanupService") as cleanup_cls:
cleanup_instance = MagicMock()
cleanup_cls.return_value = cleanup_instance
service = HealthService(db, config_manager=config_manager)
deleted = service.delete_validation_report("rec-1", task_manager=task_manager)
assert deleted is True
assert db.delete.call_count == 2
db.delete.assert_any_call(target_record)
db.delete.assert_any_call(older_peer)
db.commit.assert_called_once()
cleanup_instance.delete_task_with_logs.assert_any_call("task-1")
cleanup_instance.delete_task_with_logs.assert_any_call("task-2")
cleanup_instance.delete_task_with_logs.call_count == 2
assert "task-1" not in task_manager.tasks
assert "task-2" not in task_manager.tasks
assert "task-3" in task_manager.tasks
def test_delete_validation_report_returns_false_for_unknown_record():
db = MagicMock()
db.query.return_value.filter.return_value.first.return_value = None
service = HealthService(db, config_manager=MagicMock())
assert service.delete_validation_report("missing") is False
def test_delete_validation_report_swallows_linked_task_cleanup_failure():
db = MagicMock()
config_manager = MagicMock()
task_manager = MagicMock()
task_manager.tasks = {"task-1": object()}
record = ValidationRecord(
id="rec-1",
task_id="task-1",
dashboard_id="42",
environment_id="env_1",
status="PASS",
timestamp=datetime.utcnow(),
summary="Healthy",
issues=[],
screenshot_path=None,
)
first_query = MagicMock()
first_query.first.return_value = record
peer_query = MagicMock()
peer_query.filter.return_value = peer_query
peer_query.all.return_value = [record]
db.query.side_effect = [first_query, peer_query]
with patch("src.services.health_service.TaskCleanupService") as cleanup_cls, patch(
"src.services.health_service.logger"
) as mock_logger:
cleanup_instance = MagicMock()
cleanup_instance.delete_task_with_logs.side_effect = RuntimeError("cleanup exploded")
cleanup_cls.return_value = cleanup_instance
service = HealthService(db, config_manager=config_manager)
deleted = service.delete_validation_report("rec-1", task_manager=task_manager)
assert deleted is True
db.delete.assert_called_once_with(record)
db.commit.assert_called_once()
cleanup_instance.delete_task_with_logs.assert_called_once_with("task-1")
mock_logger.warning.assert_called_once()
assert "task-1" not in task_manager.tasks
# [/DEF:test_health_service:Module]

View File

@@ -7,9 +7,15 @@ import pytest
import os
from unittest.mock import MagicMock
from sqlalchemy.orm import Session
from cryptography.fernet import Fernet
from src.services.llm_provider import EncryptionManager, LLMProviderService
from src.models.llm import LLMProvider
from src.plugins.llm_analysis.models import LLMProviderConfig, ProviderType
from src.plugins.llm_analysis.models import LLMProviderConfig, LLMProviderType
# [DEF:_test_encryption_key_fixture:Global]
# @PURPOSE: Ensure encryption-dependent provider tests run with a valid Fernet key.
os.environ.setdefault("ENCRYPTION_KEY", Fernet.generate_key().decode())
# [/DEF:_test_encryption_key_fixture:Global]
# @TEST_CONTRACT: EncryptionManagerModel -> Invariants
# @TEST_INVARIANT: symmetric_encryption
@@ -50,7 +56,7 @@ def test_get_all_providers(service, mock_db):
def test_create_provider(service, mock_db):
config = LLMProviderConfig(
provider_type=ProviderType.OPENAI,
provider_type=LLMProviderType.OPENAI,
name="Test OpenAI",
base_url="https://api.openai.com",
api_key="sk-test",
@@ -79,3 +85,32 @@ def test_get_decrypted_api_key(service, mock_db):
def test_get_decrypted_api_key_not_found(service, mock_db):
mock_db.query().filter().first.return_value = None
assert service.get_decrypted_api_key("missing") is None
def test_update_provider_ignores_masked_placeholder_api_key(service, mock_db):
existing_encrypted = EncryptionManager().encrypt("secret-value")
mock_provider = LLMProvider(
id="p1",
provider_type="openai",
name="Existing",
base_url="https://api.openai.com/v1",
api_key=existing_encrypted,
default_model="gpt-4o",
is_active=True,
)
mock_db.query().filter().first.return_value = mock_provider
config = LLMProviderConfig(
id="p1",
provider_type=LLMProviderType.OPENAI,
name="Existing",
base_url="https://api.openai.com/v1",
api_key="********",
default_model="gpt-4o",
is_active=False,
)
updated = service.update_provider("p1", config)
assert updated is mock_provider
assert updated.api_key == existing_encrypted
assert EncryptionManager().decrypt(updated.api_key) == "secret-value"
assert updated.is_active is False

View File

@@ -4,21 +4,138 @@
# @PURPOSE: Business logic for aggregating dashboard health status from validation records.
# @LAYER: Domain/Service
# @RELATION: DEPENDS_ON -> ValidationRecord
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client.SupersetClient
# @RELATION: DEPENDS_ON -> backend.src.core.task_manager.cleanup.TaskCleanupService
from typing import List, Dict, Any
from typing import List, Dict, Any, Optional, Tuple
from sqlalchemy.orm import Session
from sqlalchemy import func, desc
import os
from ..models.llm import ValidationRecord
from ..schemas.health import DashboardHealthItem, HealthSummaryResponse
from ..core.logger import logger
from ..core.superset_client import SupersetClient
from ..core.task_manager.cleanup import TaskCleanupService
from ..core.task_manager import TaskManager
# [DEF:HealthService:Class]
# @TIER: STANDARD
# @PURPOSE: Aggregate latest dashboard validation state and manage persisted health report lifecycle.
# @RELATION: CALLS -> backend.src.core.superset_client.SupersetClient
# @RELATION: CALLS -> backend.src.core.task_manager.cleanup.TaskCleanupService
class HealthService:
"""
@PURPOSE: Service for managing and querying dashboard health data.
"""
def __init__(self, db: Session):
# [DEF:HealthService.__init__:Function]
# @PURPOSE: Initialize health service with DB session and optional config access for dashboard metadata resolution.
# @PRE: db is a valid SQLAlchemy session.
# @POST: Service is ready to aggregate summaries and delete health reports.
def __init__(self, db: Session, config_manager = None):
self.db = db
self.config_manager = config_manager
self._dashboard_meta_cache: Dict[Tuple[str, str], Dict[str, Optional[str]]] = {}
# [/DEF:HealthService.__init__:Function]
# [DEF:HealthService._prime_dashboard_meta_cache:Function]
# @PURPOSE: Warm dashboard slug/title cache with one Superset list fetch per environment.
# @PRE: records may contain mixed numeric and slug dashboard identifiers.
# @POST: Numeric dashboard ids for known environments are cached when discoverable.
# @SIDE_EFFECT: May call Superset dashboard list API once per referenced environment.
def _prime_dashboard_meta_cache(self, records: List[ValidationRecord]) -> None:
if not self.config_manager or not records:
return
numeric_ids_by_env: Dict[str, set[str]] = {}
for record in records:
environment_id = str(record.environment_id or "").strip()
dashboard_id = str(record.dashboard_id or "").strip()
if not environment_id or not dashboard_id or not dashboard_id.isdigit():
continue
cache_key = (environment_id, dashboard_id)
if cache_key in self._dashboard_meta_cache:
continue
numeric_ids_by_env.setdefault(environment_id, set()).add(dashboard_id)
if not numeric_ids_by_env:
return
environments = {
str(getattr(env, "id", "")).strip(): env
for env in self.config_manager.get_environments()
if str(getattr(env, "id", "")).strip()
}
for environment_id, dashboard_ids in numeric_ids_by_env.items():
env = environments.get(environment_id)
if not env:
for dashboard_id in dashboard_ids:
self._dashboard_meta_cache[(environment_id, dashboard_id)] = {
"slug": None,
"title": None,
}
continue
try:
dashboards = SupersetClient(env).get_dashboards_summary()
dashboard_meta_map = {
str(item.get("id")): {
"slug": item.get("slug"),
"title": item.get("title"),
}
for item in dashboards
if str(item.get("id") or "").strip()
}
for dashboard_id in dashboard_ids:
self._dashboard_meta_cache[(environment_id, dashboard_id)] = dashboard_meta_map.get(
dashboard_id,
{"slug": None, "title": None},
)
except Exception as exc:
logger.warning(
"[HealthService][_prime_dashboard_meta_cache] Failed to preload dashboard metadata for env=%s: %s",
environment_id,
exc,
)
for dashboard_id in dashboard_ids:
self._dashboard_meta_cache[(environment_id, dashboard_id)] = {
"slug": None,
"title": None,
}
# [/DEF:HealthService._prime_dashboard_meta_cache:Function]
# [DEF:HealthService._resolve_dashboard_meta:Function]
# @PURPOSE: Resolve slug/title for a dashboard referenced by persisted validation record.
# @PRE: dashboard_id may be numeric or slug-like; environment_id may be empty.
# @POST: Returns dict with `slug` and `title` keys, using cache when possible.
# @SIDE_EFFECT: May call Superset API through SupersetClient.
def _resolve_dashboard_meta(self, dashboard_id: str, environment_id: Optional[str]) -> Dict[str, Optional[str]]:
normalized_dashboard_id = str(dashboard_id or "").strip()
normalized_environment_id = str(environment_id or "").strip()
if not normalized_dashboard_id:
return {"slug": None, "title": None}
if not normalized_dashboard_id.isdigit():
return {"slug": normalized_dashboard_id, "title": None}
if not self.config_manager or not normalized_environment_id:
return {"slug": None, "title": None}
cache_key = (normalized_environment_id, normalized_dashboard_id)
cached = self._dashboard_meta_cache.get(cache_key)
if cached is not None:
return cached
meta = {"slug": None, "title": None}
self._dashboard_meta_cache[cache_key] = meta
return meta
# [/DEF:HealthService._resolve_dashboard_meta:Function]
# [DEF:HealthService.get_health_summary:Function]
# @PURPOSE: Aggregate latest validation status per dashboard and enrich rows with dashboard slug/title.
# @PRE: environment_id may be omitted to aggregate across all environments.
# @POST: Returns HealthSummaryResponse with counts and latest record row per dashboard.
# @SIDE_EFFECT: May call Superset API to resolve dashboard metadata.
async def get_health_summary(self, environment_id: str = None) -> HealthSummaryResponse:
"""
@PURPOSE: Aggregates the latest validation status for all dashboards.
@@ -44,6 +161,8 @@ class HealthService:
records = query.all()
self._prime_dashboard_meta_cache(records)
items = []
pass_count = 0
warn_count = 0
@@ -62,8 +181,12 @@ class HealthService:
unknown_count += 1
status = "UNKNOWN"
meta = self._resolve_dashboard_meta(rec.dashboard_id, rec.environment_id)
items.append(DashboardHealthItem(
record_id=rec.id,
dashboard_id=rec.dashboard_id,
dashboard_slug=meta.get("slug"),
dashboard_title=meta.get("title"),
environment_id=rec.environment_id or "unknown",
status=status,
last_check=rec.timestamp,
@@ -80,5 +203,82 @@ class HealthService:
fail_count=fail_count,
unknown_count=unknown_count
)
# [/DEF:HealthService.get_health_summary:Function]
# [/DEF:health_service:Module]
# [DEF:HealthService.delete_validation_report:Function]
# @PURPOSE: Delete one persisted health report and optionally clean linked task/log artifacts.
# @PRE: record_id is a validation record identifier.
# @POST: Returns True only when a matching record was deleted.
# @SIDE_EFFECT: Deletes DB row, optional screenshot file, and optional task/log persistence.
def delete_validation_report(self, record_id: str, task_manager: Optional[TaskManager] = None) -> bool:
record = self.db.query(ValidationRecord).filter(ValidationRecord.id == record_id).first()
if not record:
return False
peer_query = self.db.query(ValidationRecord).filter(
ValidationRecord.dashboard_id == record.dashboard_id
)
if record.environment_id is None:
peer_query = peer_query.filter(ValidationRecord.environment_id.is_(None))
else:
peer_query = peer_query.filter(ValidationRecord.environment_id == record.environment_id)
records_to_delete = peer_query.all()
screenshot_paths = [
str(item.screenshot_path or "").strip()
for item in records_to_delete
if str(item.screenshot_path or "").strip()
]
task_ids = {
str(item.task_id or "").strip()
for item in records_to_delete
if str(item.task_id or "").strip()
}
logger.info(
"[HealthService][delete_validation_report] Removing %s validation record(s) for dashboard=%s environment=%s triggered_by_record=%s",
len(records_to_delete),
record.dashboard_id,
record.environment_id,
record_id,
)
for item in records_to_delete:
self.db.delete(item)
self.db.commit()
for screenshot_path in screenshot_paths:
try:
if os.path.exists(screenshot_path):
os.remove(screenshot_path)
except OSError as exc:
logger.warning(
"[HealthService][delete_validation_report] Failed to remove screenshot %s: %s",
screenshot_path,
exc,
)
if task_ids and task_manager and self.config_manager:
try:
cleanup_service = TaskCleanupService(
task_manager.persistence_service,
task_manager.log_persistence_service,
self.config_manager,
)
for task_id in task_ids:
task_manager.tasks.pop(task_id, None)
cleanup_service.delete_task_with_logs(task_id)
except Exception as exc:
logger.warning(
"[HealthService][delete_validation_report] Failed to cleanup linked task/logs for dashboard=%s environment=%s: %s",
record.dashboard_id,
record.environment_id,
exc,
)
return True
# [/DEF:HealthService.delete_validation_report:Function]
# [/DEF:HealthService:Class]
# [/DEF:health_service:Module]

View File

@@ -16,6 +16,8 @@ import os
if TYPE_CHECKING:
from ..plugins.llm_analysis.models import LLMProviderConfig
MASKED_API_KEY_PLACEHOLDER = "********"
# [DEF:_require_fernet_key:Function]
# @TIER: CRITICAL
# @PURPOSE: Load and validate the Fernet key used for secret encryption.
@@ -145,8 +147,12 @@ class LLMProviderService:
db_provider.provider_type = config.provider_type.value
db_provider.name = config.name
db_provider.base_url = config.base_url
# Only update API key if provided (not None and not empty)
if config.api_key is not None and config.api_key != "":
# Ignore masked placeholder values; they are display-only and must not overwrite secrets.
if (
config.api_key is not None
and config.api_key != ""
and config.api_key != MASKED_API_KEY_PLACEHOLDER
):
db_provider.api_key = self.encryption.encrypt(config.api_key)
db_provider.default_model = config.default_model
db_provider.is_active = config.is_active

View File

@@ -117,6 +117,33 @@ class ProfileService:
)
# [/DEF:get_my_preference:Function]
# [DEF:get_dashboard_filter_binding:Function]
# @PURPOSE: Return only dashboard-filter fields required by dashboards listing hot path.
# @PRE: current_user is authenticated.
# @POST: Returns normalized username and profile-default filter toggles without security summary expansion.
def get_dashboard_filter_binding(self, current_user: User) -> dict:
with belief_scope("ProfileService.get_dashboard_filter_binding", f"user_id={current_user.id}"):
preference = self._get_preference_row(current_user.id)
if preference is None:
return {
"superset_username": None,
"superset_username_normalized": None,
"show_only_my_dashboards": False,
"show_only_slug_dashboards": True,
}
return {
"superset_username": self._sanitize_username(preference.superset_username),
"superset_username_normalized": self._normalize_username(preference.superset_username),
"show_only_my_dashboards": bool(preference.show_only_my_dashboards),
"show_only_slug_dashboards": bool(
preference.show_only_slug_dashboards
if preference.show_only_slug_dashboards is not None
else True
),
}
# [/DEF:get_dashboard_filter_binding:Function]
# [DEF:update_my_preference:Function]
# @PURPOSE: Validate and persist current user's profile preference in self-scoped mode.
# @PRE: current_user is authenticated and payload is provided.

View File

@@ -10,6 +10,7 @@
# [SECTION: IMPORTS]
import re
from functools import lru_cache
from pathlib import Path
from typing import Iterable, Set, Tuple
@@ -80,6 +81,17 @@ def _discover_route_permissions() -> Set[Tuple[str, str]]:
# [/DEF:_discover_route_permissions:Function]
# [DEF:_discover_route_permissions_cached:Function]
# @PURPOSE: Cache route permission discovery because route source files are static during normal runtime.
# @PRE: None.
# @POST: Returns stable discovered route permission pairs without repeated filesystem scans.
@lru_cache(maxsize=1)
def _discover_route_permissions_cached() -> Tuple[Tuple[str, str], ...]:
with belief_scope("rbac_permission_catalog._discover_route_permissions_cached"):
return tuple(sorted(_discover_route_permissions()))
# [/DEF:_discover_route_permissions_cached:Function]
# [DEF:_discover_plugin_execute_permissions:Function]
# @PURPOSE: Derives dynamic task permissions of form plugin:{plugin_id}:EXECUTE from plugin registry.
# @PRE: plugin_loader is optional and may expose get_all_plugin_configs.
@@ -108,6 +120,19 @@ def _discover_plugin_execute_permissions(plugin_loader=None) -> Set[Tuple[str, s
# [/DEF:_discover_plugin_execute_permissions:Function]
# [DEF:_discover_plugin_execute_permissions_cached:Function]
# @PURPOSE: Cache dynamic plugin EXECUTE permission pairs by normalized plugin id tuple.
# @PRE: plugin_ids is a deterministic tuple of plugin ids.
# @POST: Returns stable permission tuple without repeated plugin catalog expansion.
@lru_cache(maxsize=8)
def _discover_plugin_execute_permissions_cached(
plugin_ids: Tuple[str, ...],
) -> Tuple[Tuple[str, str], ...]:
with belief_scope("rbac_permission_catalog._discover_plugin_execute_permissions_cached"):
return tuple((f"plugin:{plugin_id}", "EXECUTE") for plugin_id in plugin_ids)
# [/DEF:_discover_plugin_execute_permissions_cached:Function]
# [DEF:discover_declared_permissions:Function]
# @PURPOSE: Builds canonical RBAC permission catalog from routes and plugin registry.
# @PRE: plugin_loader may be provided for dynamic task plugin permission discovery.
@@ -115,8 +140,17 @@ def _discover_plugin_execute_permissions(plugin_loader=None) -> Set[Tuple[str, s
# @RETURN: Set[Tuple[str, str]] - Complete discovered permission set.
def discover_declared_permissions(plugin_loader=None) -> Set[Tuple[str, str]]:
with belief_scope("rbac_permission_catalog.discover_declared_permissions"):
permissions = _discover_route_permissions()
permissions.update(_discover_plugin_execute_permissions(plugin_loader))
permissions = set(_discover_route_permissions_cached())
plugin_ids = tuple(
sorted(
{
str(getattr(plugin_config, "id", "") or "").strip()
for plugin_config in (plugin_loader.get_all_plugin_configs() if plugin_loader else [])
if str(getattr(plugin_config, "id", "") or "").strip()
}
)
)
permissions.update(_discover_plugin_execute_permissions_cached(plugin_ids))
return permissions
# [/DEF:discover_declared_permissions:Function]
@@ -153,4 +187,4 @@ def sync_permission_catalog(
return len(missing_pairs)
# [/DEF:sync_permission_catalog:Function]
# [/DEF:backend.src.services.rbac_permission_catalog:Module]
# [/DEF:backend.src.services.rbac_permission_catalog:Module]

View File

@@ -142,10 +142,15 @@
togglingProviderIds = new Set(togglingProviderIds);
try {
await requestApi(`/llm/providers/${provider.id}`, "PUT", {
...provider,
const updatePayload = {
id: provider.id,
name: provider.name,
provider_type: provider.provider_type,
base_url: provider.base_url,
default_model: provider.default_model,
is_active: provider.is_active,
});
};
await requestApi(`/llm/providers/${provider.id}`, "PUT", updatePayload);
} catch (err) {
provider.is_active = previousState;
providers = [...providers];

View File

@@ -46,6 +46,15 @@ describe('ProviderConfig edit interaction contract', () => {
expect(source).toContain('await requestApi(`/llm/providers/${provider.id}`, "DELETE")');
expect(source).toContain('onclick={() => handleDelete(provider)}');
});
it('does not forward masked api_key when toggling provider activation', () => {
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
expect(source).toContain('const updatePayload = {');
expect(source).toContain('provider_type: provider.provider_type');
expect(source).toContain('default_model: provider.default_model');
expect(source).not.toContain('await requestApi(`/llm/providers/${provider.id}`, "PUT", {\n ...provider,');
});
});
// [/DEF:provider_config_edit_contract_tests:Function]
// [/DEF:frontend.src.components.llm.__tests__.provider_config_integration:Module]

View File

@@ -27,7 +27,7 @@
* @TEST_FIXTURE init_state -> {"taskId": "task-1"}
* @TEST_INVARIANT correct_fetch -> verifies: [init_state]
*/
import { onDestroy, onMount } from "svelte";
import { onDestroy, onMount, untrack } from "svelte";
import { page } from "$app/state";
import { goto } from "$app/navigation";
import { api } from "$lib/api.js";
@@ -171,7 +171,10 @@
);
$effect(() => {
void loadScreenshotBlobUrls(screenshotPaths);
const paths = screenshotPaths;
untrack(() => {
void loadScreenshotBlobUrls(paths);
});
});
</script>

View File

@@ -0,0 +1,32 @@
// [DEF:frontend.src.routes.reports.llm.taskid.report_page_contract_test:Module]
// @TIER: STANDARD
// @SEMANTICS: llm-report, svelte-effect, screenshot-loading, regression-test
// @PURPOSE: Protect the LLM report page from self-triggering screenshot load effects.
// @LAYER: UI Tests
// @RELATION: VERIFIES -> frontend/src/routes/reports/llm/[taskId]/+page.svelte
import { describe, it, expect } from "vitest";
import fs from "node:fs";
import path from "node:path";
const PAGE_PATH = path.resolve(
process.cwd(),
"src/routes/reports/llm/[taskId]/+page.svelte",
);
// [DEF:llm_report_screenshot_effect_contract:Function]
// @TIER: STANDARD
// @PURPOSE: Ensure screenshot loading stays untracked from blob-url mutation state.
// @PRE: Report page source exists.
// @POST: Contract fails if screenshot loading effect can subscribe to screenshotBlobUrls updates.
describe("LLM report screenshot effect contract", () => {
it("uses untrack around screenshot blob loading side effect", () => {
const source = fs.readFileSync(PAGE_PATH, "utf-8");
expect(source).toContain('import { onDestroy, onMount, untrack } from "svelte";');
expect(source).toContain("untrack(() => {");
expect(source).toContain("void loadScreenshotBlobUrls(paths);");
});
});
// [/DEF:llm_report_screenshot_effect_contract:Function]
// [/DEF:frontend.src.routes.reports.llm.taskid.report_page_contract_test:Module]