semantic update
This commit is contained in:
@@ -18,6 +18,7 @@ import re
|
||||
import json
|
||||
import datetime
|
||||
import fnmatch
|
||||
import argparse
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional, Any, Pattern, Tuple, Set
|
||||
@@ -965,6 +966,106 @@ class SemanticMapGenerator:
|
||||
self._generate_module_map()
|
||||
# [/DEF:_generate_artifacts:Function]
|
||||
|
||||
# [DEF:_print_agent_report:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Prints a JSON report optimized for AI agent orchestration and control.
|
||||
# @PRE: Validation and artifact generation are complete.
|
||||
# @POST: JSON report printed to stdout.
|
||||
def _print_agent_report(self):
|
||||
with belief_scope("_print_agent_report"):
|
||||
# Calculate global score (re-using logic from _generate_report)
|
||||
total_weighted_score = 0
|
||||
total_weight = 0
|
||||
for file_path, data in self.file_scores.items():
|
||||
tier = data["tier"]
|
||||
score = data["score"]
|
||||
weight = 3 if tier == Tier.CRITICAL else (2 if tier == Tier.STANDARD else 1)
|
||||
total_weighted_score += score * weight
|
||||
total_weight += weight
|
||||
gs = total_weighted_score / total_weight if total_weight > 0 else 0
|
||||
|
||||
# Flatten entities to get per-file issues
|
||||
file_data = {}
|
||||
def collect_recursive(entities):
|
||||
for e in entities:
|
||||
path = e.file_path
|
||||
if path not in file_data:
|
||||
file_data[path] = {"issues": [], "tier": e.get_tier().value, "score": self.file_scores.get(path, {}).get("score", 0)}
|
||||
file_data[path]["issues"].extend([i.to_dict() for i in e.compliance_issues])
|
||||
collect_recursive(e.children)
|
||||
collect_recursive(self.entities)
|
||||
|
||||
# Critical parsing errors
|
||||
cpe = []
|
||||
for path, data in file_data.items():
|
||||
for i in data["issues"]:
|
||||
msg = i.get("message", "").lower()
|
||||
sev = i.get("severity", "").lower()
|
||||
if "parsing" in msg and (sev == "error" or "critical" in msg):
|
||||
cpe.append({"file": path, "severity": i.get("severity"), "message": i.get("message")})
|
||||
|
||||
# <0.7 by tier
|
||||
lt = {"CRITICAL": 0, "STANDARD": 0, "TRIVIAL": 0, "UNKNOWN": 0}
|
||||
for path, data in file_data.items():
|
||||
if data["score"] < 0.7:
|
||||
tier = data["tier"]
|
||||
lt[tier if tier in lt else "UNKNOWN"] += 1
|
||||
|
||||
# Priority counts
|
||||
p2 = 0
|
||||
p3 = 0
|
||||
for path, data in file_data.items():
|
||||
tier = data["tier"]
|
||||
issues = data["issues"]
|
||||
if tier == "CRITICAL" and any("Missing Mandatory Tag" in i.get("message", "") for i in issues):
|
||||
p2 += 1
|
||||
if tier == "STANDARD" and any("@RELATION" in i.get("message", "") and "Missing Mandatory Tag" in i.get("message", "") for i in issues):
|
||||
p3 += 1
|
||||
|
||||
# Target files status
|
||||
targets = [
|
||||
'frontend/src/routes/migration/+page.svelte',
|
||||
'frontend/src/routes/migration/mappings/+page.svelte',
|
||||
'frontend/src/components/auth/ProtectedRoute.svelte',
|
||||
'backend/src/core/auth/repository.py',
|
||||
'backend/src/core/migration/risk_assessor.py',
|
||||
'backend/src/api/routes/migration.py',
|
||||
'backend/src/models/config.py',
|
||||
'backend/src/services/auth_service.py',
|
||||
'backend/src/core/config_manager.py',
|
||||
'backend/src/core/migration_engine.py'
|
||||
]
|
||||
status = []
|
||||
for t in targets:
|
||||
f = file_data.get(t)
|
||||
if not f:
|
||||
status.append({"path": t, "found": False})
|
||||
continue
|
||||
sc = f["score"]
|
||||
status.append({
|
||||
"path": t,
|
||||
"found": True,
|
||||
"score": sc,
|
||||
"tier": f["tier"],
|
||||
"under_0_7": sc < 0.7,
|
||||
"violations": len(f["issues"]) > 0,
|
||||
"issues_count": len(f["issues"])
|
||||
})
|
||||
|
||||
out = {
|
||||
"global_score": gs,
|
||||
"critical_parsing_errors_count": len(cpe),
|
||||
"critical_parsing_errors": cpe[:50],
|
||||
"lt_0_7_by_tier": lt,
|
||||
"priority_1_blockers": len(cpe),
|
||||
"priority_2_tier1_critical_missing_mandatory_tags_files": p2,
|
||||
"priority_3_tier2_standard_missing_relation_files": p3,
|
||||
"targets": status,
|
||||
"total_files": len(file_data)
|
||||
}
|
||||
print(json.dumps(out, ensure_ascii=False))
|
||||
# [/DEF:_print_agent_report:Function]
|
||||
|
||||
# [DEF:_generate_report:Function]
|
||||
# @TIER: CRITICAL
|
||||
# @PURPOSE: Generates the Markdown compliance report with severity levels.
|
||||
@@ -1306,7 +1407,14 @@ class SemanticMapGenerator:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Generate Semantic Map and Compliance Reports")
|
||||
parser.add_argument("--agent-report", action="store_true", help="Output JSON report for AI agents")
|
||||
args = parser.parse_args()
|
||||
|
||||
generator = SemanticMapGenerator(PROJECT_ROOT)
|
||||
generator.run()
|
||||
|
||||
if args.agent_report:
|
||||
generator._print_agent_report()
|
||||
|
||||
# [/DEF:generate_semantic_map:Module]
|
||||
|
||||
Reference in New Issue
Block a user