refactor(semantics): migrate legacy @TIER to @COMPLEXITY annotations

- Replaced @TIER: TRIVIAL with @COMPLEXITY: 1
- Replaced @TIER: STANDARD with @COMPLEXITY: 3
- Replaced @TIER: CRITICAL with @COMPLEXITY: 5
- Manually elevated specific critical/complex components to levels 2 and 4
- Ignored legacy, specs, and node_modules directories
- Updated generated semantic map
This commit is contained in:
2026-03-16 10:06:44 +03:00
parent 321e0eb2db
commit 274510fc38
321 changed files with 30101 additions and 58483 deletions

View File

@@ -1,5 +1,5 @@
# [DEF:backend.src.services.llm_prompt_templates:Module]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @SEMANTICS: llm, prompts, templates, settings
# @PURPOSE: Provide default LLM prompt templates and normalization helpers for runtime usage.
# @LAYER: Domain
@@ -13,7 +13,7 @@ from typing import Dict, Any, Optional
# [DEF:DEFAULT_LLM_PROMPTS:Constant]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Default prompt templates used by documentation, dashboard validation, and git commit generation.
DEFAULT_LLM_PROMPTS: Dict[str, str] = {
"dashboard_validation_prompt": (
@@ -62,7 +62,7 @@ DEFAULT_LLM_PROMPTS: Dict[str, str] = {
# [DEF:DEFAULT_LLM_PROVIDER_BINDINGS:Constant]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Default provider binding per task domain.
DEFAULT_LLM_PROVIDER_BINDINGS: Dict[str, str] = {
"dashboard_validation": "",
@@ -73,7 +73,7 @@ DEFAULT_LLM_PROVIDER_BINDINGS: Dict[str, str] = {
# [DEF:DEFAULT_LLM_ASSISTANT_SETTINGS:Constant]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Default planner settings for assistant chat intent model/provider resolution.
DEFAULT_LLM_ASSISTANT_SETTINGS: Dict[str, str] = {
"assistant_planner_provider": "",
@@ -83,7 +83,7 @@ DEFAULT_LLM_ASSISTANT_SETTINGS: Dict[str, str] = {
# [DEF:normalize_llm_settings:Function]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Ensure llm settings contain stable schema with prompts section and default templates.
# @PRE: llm_settings is dictionary-like value or None.
# @POST: Returned dict contains prompts with all required template keys.
@@ -127,7 +127,7 @@ def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
# [DEF:is_multimodal_model:Function]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Heuristically determine whether model supports image input required for dashboard validation.
# @PRE: model_name may be empty or mixed-case.
# @POST: Returns True when model likely supports multimodal input.
@@ -169,7 +169,7 @@ def is_multimodal_model(model_name: str, provider_type: Optional[str] = None) ->
# [DEF:resolve_bound_provider_id:Function]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Resolve provider id configured for a task binding with fallback to default provider.
# @PRE: llm_settings is normalized or raw dict from config.
# @POST: Returns configured provider id or fallback id/empty string when not defined.
@@ -185,7 +185,7 @@ def resolve_bound_provider_id(llm_settings: Any, task_key: str) -> str:
# [DEF:render_prompt:Function]
# @TIER: STANDARD
# @COMPLEXITY: 3
# @PURPOSE: Render prompt template using deterministic placeholder replacement with graceful fallback.
# @PRE: template is a string and variables values are already stringifiable.
# @POST: Returns rendered prompt text with known placeholders substituted.