semantics

This commit is contained in:
2026-04-02 12:12:23 +03:00
parent ea457c8d18
commit c6147385e5
15 changed files with 138 additions and 312 deletions

View File

@@ -6,7 +6,7 @@ os.environ["ENCRYPTION_KEY"] = "OnrCzomBWbIjTf7Y-fnhL2adlU55bHZQjp8zX5zBC5w="
# @SEMANTICS: tests, assistant, authz, confirmation, rbac
# @PURPOSE: Verify assistant confirmation ownership, expiration, and deny behavior for restricted users.
# @LAYER: UI (API Tests)
# @RELATION: DEPENDS_ON -> backend.src.api.routes.assistant
# @RELATION: DEPENDS_ON -> AssistantApi
# @INVARIANT: Security-sensitive flows fail closed for unauthorized actors.

View File

@@ -3,7 +3,7 @@
# @SEMANTICS: datasets, api, tests, pagination, mapping, docs
# @PURPOSE: Unit tests for datasets API endpoints.
# @LAYER: API
# @RELATION: DEPENDS_ON -> [src.api.routes.datasets:Module]
# @RELATION: DEPENDS_ON -> [DatasetsApi]
# @INVARIANT: Endpoint contracts remain stable for success and validation failure paths.
import pytest

View File

@@ -1456,38 +1456,18 @@ async def export_validation(session_id: str, format: ArtifactFormat=Query(Artifa
# @POST: Returns at most one active clarification question with why_it_matters, current_guess, and ordered options; sessions without a clarification record return a non-blocking empty state.
# @SIDE_EFFECT: May normalize clarification pointer and readiness state in persistence.
# @DATA_CONTRACT: Input[session_id:str] -> Output[ClarificationStateResponse]
@router.get(
"/sessions/{session_id}/clarification",
response_model=ClarificationStateResponse,
dependencies=[
Depends(_require_auto_review_flag),
Depends(_require_clarification_flag),
Depends(has_permission("dataset:session", "READ")),
],
)
async def get_clarification_state(
session_id: str,
repository: DatasetReviewSessionRepository = Depends(_get_repository),
clarification_engine: ClarificationEngine = Depends(_get_clarification_engine),
current_user: User = Depends(get_current_user),
):
with belief_scope("dataset_review.get_clarification_state"):
@router.get('/sessions/{session_id}/clarification', response_model=ClarificationStateResponse, dependencies=[Depends(_require_auto_review_flag), Depends(_require_clarification_flag), Depends(has_permission('dataset:session', 'READ'))])
async def get_clarification_state(session_id: str, repository: DatasetReviewSessionRepository=Depends(_get_repository), clarification_engine: ClarificationEngine=Depends(_get_clarification_engine), current_user: User=Depends(get_current_user)):
with belief_scope('get_clarification_state'):
logger.reason('Belief protocol reasoning checkpoint for get_clarification_state')
session = _get_owned_session_or_404(repository, session_id, current_user)
if not session.clarification_sessions:
logger.reflect('Belief protocol postcondition checkpoint for get_clarification_state')
return _serialize_empty_clarification_state()
clarification_session = _get_latest_clarification_session_or_404(session)
current_question = clarification_engine.build_question_payload(session)
return _serialize_clarification_state(
ClarificationStateResult(
clarification_session=clarification_session,
current_question=current_question,
session=session,
changed_findings=[],
)
)
logger.reflect('Belief protocol postcondition checkpoint for get_clarification_state')
return _serialize_clarification_state(ClarificationStateResult(clarification_session=clarification_session, current_question=current_question, session=session, changed_findings=[]))
# [/DEF:get_clarification_state:Function]
@@ -1499,38 +1479,15 @@ async def get_clarification_state(
# @POST: Clarification session enters active state with one current question or completes deterministically when no unresolved items remain.
# @SIDE_EFFECT: Mutates clarification pointer, readiness, and recommended action.
# @DATA_CONTRACT: Input[session_id:str] -> Output[ClarificationStateResponse]
@router.post(
"/sessions/{session_id}/clarification/resume",
response_model=ClarificationStateResponse,
dependencies=[
Depends(_require_auto_review_flag),
Depends(_require_clarification_flag),
Depends(has_permission("dataset:session", "MANAGE")),
],
)
async def resume_clarification(
session_id: str,
session_version: int = Depends(_require_session_version_header),
repository: DatasetReviewSessionRepository = Depends(_get_repository),
clarification_engine: ClarificationEngine = Depends(_get_clarification_engine),
current_user: User = Depends(get_current_user),
):
with belief_scope("dataset_review.resume_clarification"):
session = _prepare_owned_session_mutation(
repository, session_id, current_user, session_version
)
@router.post('/sessions/{session_id}/clarification/resume', response_model=ClarificationStateResponse, dependencies=[Depends(_require_auto_review_flag), Depends(_require_clarification_flag), Depends(has_permission('dataset:session', 'MANAGE'))])
async def resume_clarification(session_id: str, session_version: int=Depends(_require_session_version_header), repository: DatasetReviewSessionRepository=Depends(_get_repository), clarification_engine: ClarificationEngine=Depends(_get_clarification_engine), current_user: User=Depends(get_current_user)):
with belief_scope('resume_clarification'):
logger.reason('Belief protocol reasoning checkpoint for resume_clarification')
session = _prepare_owned_session_mutation(repository, session_id, current_user, session_version)
clarification_session = _get_latest_clarification_session_or_404(session)
current_question = clarification_engine.build_question_payload(session)
return _serialize_clarification_state(
ClarificationStateResult(
clarification_session=clarification_session,
current_question=current_question,
session=session,
changed_findings=[],
)
)
logger.reflect('Belief protocol postcondition checkpoint for resume_clarification')
return _serialize_clarification_state(ClarificationStateResult(clarification_session=clarification_session, current_question=current_question, session=session, changed_findings=[]))
# [/DEF:resume_clarification:Function]

View File

@@ -4,9 +4,9 @@
# @SEMANTICS: git, routes, api, fastapi, repository, deployment
# @PURPOSE: Provides FastAPI endpoints for Git integration operations.
# @LAYER: API
# @RELATION: USES -> [backend.src.services.git_service.GitService]
# @RELATION: USES -> [GitService]
# @RELATION: USES -> [GitSchemas]
# @RELATION: USES -> [backend.src.models.git]
# @RELATION: USES -> [GitModels]
#
# @INVARIANT: All Git operations must be routed through GitService.
@@ -768,7 +768,7 @@ async def delete_gitea_repository(
# @POST: Repository is initialized on disk and a GitRepository record is saved in DB.
# @PARAM: dashboard_ref (str)
# @PARAM: init_data (RepoInitRequest)
# @RELATION: CALLS -> GitService.init_repo
# @RELATION: CALLS -> [GitService]
@router.post("/repositories/{dashboard_ref}/init")
async def init_repository(
dashboard_ref: str,
@@ -1090,7 +1090,7 @@ async def push_changes(
# @PRE: `dashboard_ref` repository exists and has a remote configured.
# @POST: Remote changes are fetched and merged into the local branch.
# @PARAM: dashboard_ref (str)
# @RELATION: CALLS -> GitService.pull
# @RELATION: CALLS -> [GitService]
@router.post("/repositories/{dashboard_ref}/pull")
async def pull_changes(
dashboard_ref: str,
@@ -1219,7 +1219,7 @@ async def get_merge_conflicts(
# @PURPOSE: Apply mine/theirs/manual conflict resolutions from WebUI and stage files.
# @PRE: `dashboard_ref` resolves; request contains at least one resolution item.
# @POST: Resolved files are staged in index.
# @RELATION: CALLS -> GitService.resolve_conflicts
# @RELATION: CALLS -> [GitService]
@router.post("/repositories/{dashboard_ref}/merge/resolve")
async def resolve_merge_conflicts(
dashboard_ref: str,
@@ -1279,7 +1279,7 @@ async def abort_merge(
# @PURPOSE: Finalize unfinished merge from WebUI flow.
# @PRE: All conflicts are resolved and staged.
# @POST: Merge commit is created.
# @RELATION: CALLS -> GitService.continue_merge
# @RELATION: CALLS -> [GitService]
@router.post("/repositories/{dashboard_ref}/merge/continue")
async def continue_merge(
dashboard_ref: str,
@@ -1310,7 +1310,7 @@ async def continue_merge(
# @POST: Dashboard YAMLs are exported from Superset and committed to Git.
# @PARAM: dashboard_ref (str)
# @PARAM: source_env_id (Optional[str])
# @RELATION: CALLS -> GitPlugin.execute
# @RELATION: CALLS -> [GitPlugin]
@router.post("/repositories/{dashboard_ref}/sync")
async def sync_dashboard(
dashboard_ref: str,
@@ -1348,7 +1348,7 @@ async def sync_dashboard(
# @PURPOSE: Promote changes between branches via MR or direct merge.
# @PRE: dashboard repository is initialized and Git config is valid.
# @POST: Returns promotion result metadata.
# @RELATION: CALLS -> GitPlugin.execute
# @RELATION: CALLS -> [GitPlugin]
@router.post("/repositories/{dashboard_ref}/promote", response_model=PromoteResponse)
async def promote_dashboard(
dashboard_ref: str,
@@ -1499,7 +1499,7 @@ async def get_environments(
# @POST: Dashboard YAMLs are read from Git and imported into the target Superset.
# @PARAM: dashboard_ref (str)
# @PARAM: deploy_data (DeployRequest)
# @RELATION: CALLS -> GitPlugin.execute
# @RELATION: CALLS -> [GitPlugin]
@router.post("/repositories/{dashboard_ref}/deploy")
async def deploy_dashboard(
dashboard_ref: str,
@@ -1677,7 +1677,7 @@ async def get_repository_diff(
# @PURPOSE: Generate a suggested commit message using LLM.
# @PRE: Repository for `dashboard_ref` is initialized.
# @POST: Returns a suggested commit message string.
# @RELATION: CALLS -> GitService.generate_commit_message
# @RELATION: CALLS -> [GitService]
@router.post("/repositories/{dashboard_ref}/generate-message")
async def generate_commit_message(
dashboard_ref: str,

View File

@@ -4,10 +4,10 @@
# @SEMANTICS: superset, api, client, rest, http, dashboard, dataset, import, export
# @PURPOSE: Предоставляет высокоуровневый клиент для взаимодействия с Superset REST API, инкапсулируя логику запросов, обработку ошибок и пагинацию.
# @LAYER: Core
# @RELATION: [DEPENDS_ON] ->[ConfigModels]
# @RELATION: [DEPENDS_ON] ->[APIClient]
# @RELATION: [DEPENDS_ON] ->[SupersetAPIError]
# @RELATION: [DEPENDS_ON] ->[get_filename_from_headers]
# @RELATION: DEPENDS_ON -> [ConfigModels]
# @RELATION: DEPENDS_ON -> [APIClient]
# @RELATION: DEPENDS_ON -> [SupersetAPIError]
# @RELATION: DEPENDS_ON -> [get_filename_from_headers]
#
# @INVARIANT: All network operations must use the internal APIClient instance.
# @PUBLIC_API: SupersetClient
@@ -33,9 +33,9 @@ app_logger = cast(Any, app_logger)
# [DEF:SupersetClient:Class]
# @COMPLEXITY: 3
# @PURPOSE: Класс-обёртка над Superset REST API, предоставляющий методы для работы с дашбордами и датасетами.
# @RELATION: [DEPENDS_ON] ->[ConfigModels]
# @RELATION: [DEPENDS_ON] ->[APIClient]
# @RELATION: [DEPENDS_ON] ->[SupersetAPIError]
# @RELATION: DEPENDS_ON -> [ConfigModels]
# @RELATION: DEPENDS_ON -> [APIClient]
# @RELATION: DEPENDS_ON -> [SupersetAPIError]
class SupersetClient:
# [DEF:SupersetClientInit:Function]
# @COMPLEXITY: 3
@@ -43,8 +43,8 @@ class SupersetClient:
# @PRE: `env` должен быть валидным объектом Environment.
# @POST: Атрибуты `env` и `network` созданы и готовы к работе.
# @DATA_CONTRACT: Input[Environment] -> self.network[APIClient]
# @RELATION: [DEPENDS_ON] ->[Environment]
# @RELATION: [DEPENDS_ON] ->[APIClient]
# @RELATION: DEPENDS_ON -> [Environment]
# @RELATION: DEPENDS_ON -> [APIClient]
def __init__(self, env: Environment):
with belief_scope("SupersetClientInit"):
app_logger.reason(
@@ -78,7 +78,7 @@ class SupersetClient:
# @PRE: self.network must be initialized with valid auth configuration.
# @POST: Client is authenticated and tokens are stored.
# @DATA_CONTRACT: None -> Output[Dict[str, str]]
# @RELATION: [CALLS] ->[APIClient]
# @RELATION: CALLS -> [APIClient]
def authenticate(self) -> Dict[str, str]:
with belief_scope("SupersetClientAuthenticate"):
app_logger.reason(
@@ -1379,6 +1379,8 @@ class SupersetClient:
# [/DEF:SupersetClientGetDataset:Function]
from src.logger import belief_scope, logger
# [DEF:SupersetClientCompileDatasetPreview:Function]
# @COMPLEXITY: 4
# @PURPOSE: Compile dataset preview SQL through the strongest supported Superset preview endpoint family and return normalized SQL output.
@@ -1391,65 +1393,24 @@ class SupersetClient:
# @RELATION: CALLS -> [ConnectionContracts]
# @RELATION: CALLS -> [SupersetClientExtractCompiledSqlFromPreviewResponse]
# @SIDE_EFFECT: Performs upstream dataset lookup and preview network I/O against Superset.
def compile_dataset_preview(
self,
dataset_id: int,
template_params: Optional[Dict[str, Any]] = None,
effective_filters: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
with belief_scope("SupersetClient.compile_dataset_preview", f"id={dataset_id}"):
def compile_dataset_preview(self, dataset_id: int, template_params: Optional[Dict[str, Any]]=None, effective_filters: Optional[List[Dict[str, Any]]]=None) -> Dict[str, Any]:
with belief_scope('SupersetClientCompileDatasetPreview'):
logger.reason('Belief protocol reasoning checkpoint for SupersetClientCompileDatasetPreview')
dataset_response = self.get_dataset(dataset_id)
dataset_record = (
dataset_response.get("result", dataset_response)
if isinstance(dataset_response, dict)
else {}
)
query_context = self.build_dataset_preview_query_context(
dataset_id=dataset_id,
dataset_record=dataset_record,
template_params=template_params or {},
effective_filters=effective_filters or [],
)
legacy_form_data = self.build_dataset_preview_legacy_form_data(
dataset_id=dataset_id,
dataset_record=dataset_record,
template_params=template_params or {},
effective_filters=effective_filters or [],
)
legacy_form_data_payload = json.dumps(
legacy_form_data, sort_keys=True, default=str
)
dataset_record = dataset_response.get('result', dataset_response) if isinstance(dataset_response, dict) else {}
query_context = self.build_dataset_preview_query_context(dataset_id=dataset_id, dataset_record=dataset_record, template_params=template_params or {}, effective_filters=effective_filters or [])
legacy_form_data = self.build_dataset_preview_legacy_form_data(dataset_id=dataset_id, dataset_record=dataset_record, template_params=template_params or {}, effective_filters=effective_filters or [])
legacy_form_data_payload = json.dumps(legacy_form_data, sort_keys=True, default=str)
request_payload = json.dumps(query_context)
strategy_attempts: List[Dict[str, Any]] = []
strategy_candidates: List[Dict[str, Any]] = [
{
"endpoint_kind": "legacy_explore_form_data",
"endpoint": "/explore_json/form_data",
"request_transport": "query_param_form_data",
"params": {"form_data": legacy_form_data_payload},
},
{
"endpoint_kind": "legacy_data_form_data",
"endpoint": "/data",
"request_transport": "query_param_form_data",
"params": {"form_data": legacy_form_data_payload},
},
{
"endpoint_kind": "v1_chart_data",
"endpoint": "/chart/data",
"request_transport": "json_body",
"data": request_payload,
"headers": {"Content-Type": "application/json"},
},
]
strategy_candidates: List[Dict[str, Any]] = [{'endpoint_kind': 'legacy_explore_form_data', 'endpoint': '/explore_json/form_data', 'request_transport': 'query_param_form_data', 'params': {'form_data': legacy_form_data_payload}}, {'endpoint_kind': 'legacy_data_form_data', 'endpoint': '/data', 'request_transport': 'query_param_form_data', 'params': {'form_data': legacy_form_data_payload}}, {'endpoint_kind': 'v1_chart_data', 'endpoint': '/chart/data', 'request_transport': 'json_body', 'data': request_payload, 'headers': {'Content-Type': 'application/json'}}]
for candidate in strategy_candidates:
endpoint_kind = candidate["endpoint_kind"]
endpoint_path = candidate["endpoint"]
request_transport = candidate["request_transport"]
request_params = deepcopy(candidate.get("params") or {})
request_body = candidate.get("data")
request_headers = deepcopy(candidate.get("headers") or {})
endpoint_kind = candidate['endpoint_kind']
endpoint_path = candidate['endpoint']
request_transport = candidate['request_transport']
request_params = deepcopy(candidate.get('params') or {})
request_body = candidate.get('data')
request_headers = deepcopy(candidate.get('headers') or {})
request_param_keys = sorted(request_params.keys())
request_payload_keys: List[str] = []
if isinstance(request_body, str):
@@ -1461,99 +1422,25 @@ class SupersetClient:
request_payload_keys = []
elif isinstance(request_body, dict):
request_payload_keys = sorted(request_body.keys())
strategy_diagnostics = {
"endpoint": endpoint_path,
"endpoint_kind": endpoint_kind,
"request_transport": request_transport,
"contains_root_datasource": endpoint_kind == "v1_chart_data"
and "datasource" in query_context,
"contains_form_datasource": endpoint_kind.startswith("legacy_")
and "datasource" in legacy_form_data,
"contains_query_object_datasource": bool(
query_context.get("queries")
)
and isinstance(query_context["queries"][0], dict)
and "datasource" in query_context["queries"][0],
"request_param_keys": request_param_keys,
"request_payload_keys": request_payload_keys,
}
app_logger.reason(
"Attempting Superset dataset preview compilation strategy",
extra={
"dataset_id": dataset_id,
**strategy_diagnostics,
"request_params": request_params,
"request_payload": request_body,
"legacy_form_data": legacy_form_data
if endpoint_kind.startswith("legacy_")
else None,
"query_context": query_context
if endpoint_kind == "v1_chart_data"
else None,
"template_param_count": len(template_params or {}),
"filter_count": len(effective_filters or []),
},
)
strategy_diagnostics = {'endpoint': endpoint_path, 'endpoint_kind': endpoint_kind, 'request_transport': request_transport, 'contains_root_datasource': endpoint_kind == 'v1_chart_data' and 'datasource' in query_context, 'contains_form_datasource': endpoint_kind.startswith('legacy_') and 'datasource' in legacy_form_data, 'contains_query_object_datasource': bool(query_context.get('queries')) and isinstance(query_context['queries'][0], dict) and ('datasource' in query_context['queries'][0]), 'request_param_keys': request_param_keys, 'request_payload_keys': request_payload_keys}
app_logger.reason('Attempting Superset dataset preview compilation strategy', extra={'dataset_id': dataset_id, **strategy_diagnostics, 'request_params': request_params, 'request_payload': request_body, 'legacy_form_data': legacy_form_data if endpoint_kind.startswith('legacy_') else None, 'query_context': query_context if endpoint_kind == 'v1_chart_data' else None, 'template_param_count': len(template_params or {}), 'filter_count': len(effective_filters or [])})
try:
response = self.network.request(
method="POST",
endpoint=endpoint_path,
params=request_params or None,
data=request_body,
headers=request_headers or None,
)
normalized = self._extract_compiled_sql_from_preview_response(
response
)
normalized["query_context"] = query_context
normalized["legacy_form_data"] = legacy_form_data
normalized["endpoint"] = endpoint_path
normalized["endpoint_kind"] = endpoint_kind
normalized["dataset_id"] = dataset_id
normalized["strategy_attempts"] = strategy_attempts + [
{
**strategy_diagnostics,
"success": True,
}
]
app_logger.reflect(
"Dataset preview compilation returned normalized SQL payload",
extra={
"dataset_id": dataset_id,
**strategy_diagnostics,
"success": True,
"compiled_sql_length": len(
str(normalized.get("compiled_sql") or "")
),
"response_diagnostics": normalized.get(
"response_diagnostics"
),
},
)
response = self.network.request(method='POST', endpoint=endpoint_path, params=request_params or None, data=request_body, headers=request_headers or None)
normalized = self._extract_compiled_sql_from_preview_response(response)
normalized['query_context'] = query_context
normalized['legacy_form_data'] = legacy_form_data
normalized['endpoint'] = endpoint_path
normalized['endpoint_kind'] = endpoint_kind
normalized['dataset_id'] = dataset_id
normalized['strategy_attempts'] = strategy_attempts + [{**strategy_diagnostics, 'success': True}]
app_logger.reflect('Dataset preview compilation returned normalized SQL payload', extra={'dataset_id': dataset_id, **strategy_diagnostics, 'success': True, 'compiled_sql_length': len(str(normalized.get('compiled_sql') or '')), 'response_diagnostics': normalized.get('response_diagnostics')})
logger.reflect('Belief protocol postcondition checkpoint for SupersetClientCompileDatasetPreview')
return normalized
except Exception as exc:
failure_diagnostics = {
**strategy_diagnostics,
"success": False,
"error": str(exc),
}
failure_diagnostics = {**strategy_diagnostics, 'success': False, 'error': str(exc)}
strategy_attempts.append(failure_diagnostics)
app_logger.explore(
"Superset dataset preview compilation strategy failed",
extra={
"dataset_id": dataset_id,
**failure_diagnostics,
"request_params": request_params,
"request_payload": request_body,
},
)
raise SupersetAPIError(
"Superset preview compilation failed for all known strategies "
f"(attempts={strategy_attempts!r})"
)
app_logger.explore('Superset dataset preview compilation strategy failed', extra={'dataset_id': dataset_id, **failure_diagnostics, 'request_params': request_params, 'request_payload': request_body})
raise SupersetAPIError(f'Superset preview compilation failed for all known strategies (attempts={strategy_attempts!r})')
# [/DEF:SupersetClientCompileDatasetPreview:Function]
# [DEF:SupersetClientBuildDatasetPreviewLegacyFormData:Function]
@@ -1562,74 +1449,35 @@ class SupersetClient:
# @PRE: dataset_record should come from Superset dataset detail when possible.
# @POST: Returns one serialized-ready form_data structure preserving native filter clauses in legacy transport fields.
# @DATA_CONTRACT: Input[dataset_id:int,dataset_record:Dict,template_params:Dict,effective_filters:List[Dict]] -> Output[Dict[str, Any]]
# @RELATION: [CALLS] ->[SupersetClientBuildDatasetPreviewQueryContext]
# @RELATION: CALLS -> [SupersetClientBuildDatasetPreviewQueryContext]
# @SIDE_EFFECT: Emits reasoning diagnostics describing the inferred legacy payload shape.
def build_dataset_preview_legacy_form_data(
self,
dataset_id: int,
dataset_record: Dict[str, Any],
template_params: Dict[str, Any],
effective_filters: List[Dict[str, Any]],
) -> Dict[str, Any]:
with belief_scope(
"SupersetClient.build_dataset_preview_legacy_form_data", f"id={dataset_id}"
):
query_context = self.build_dataset_preview_query_context(
dataset_id=dataset_id,
dataset_record=dataset_record,
template_params=template_params,
effective_filters=effective_filters,
)
query_object = deepcopy(
query_context.get("queries", [{}])[0]
if query_context.get("queries")
else {}
)
legacy_form_data = deepcopy(query_context.get("form_data", {}))
legacy_form_data.pop("datasource", None)
legacy_form_data["metrics"] = deepcopy(
query_object.get("metrics", ["count"])
)
legacy_form_data["columns"] = deepcopy(query_object.get("columns", []))
legacy_form_data["orderby"] = deepcopy(query_object.get("orderby", []))
legacy_form_data["annotation_layers"] = deepcopy(
query_object.get("annotation_layers", [])
)
legacy_form_data["row_limit"] = query_object.get("row_limit", 1000)
legacy_form_data["series_limit"] = query_object.get("series_limit", 0)
legacy_form_data["url_params"] = deepcopy(
query_object.get("url_params", template_params)
)
legacy_form_data["applied_time_extras"] = deepcopy(
query_object.get("applied_time_extras", {})
)
legacy_form_data["result_format"] = query_context.get(
"result_format", "json"
)
legacy_form_data["result_type"] = query_context.get("result_type", "query")
legacy_form_data["force"] = bool(query_context.get("force", True))
extras = query_object.get("extras")
def build_dataset_preview_legacy_form_data(self, dataset_id: int, dataset_record: Dict[str, Any], template_params: Dict[str, Any], effective_filters: List[Dict[str, Any]]) -> Dict[str, Any]:
with belief_scope('SupersetClientBuildDatasetPreviewLegacyFormData'):
logger.reason('Belief protocol reasoning checkpoint for SupersetClientBuildDatasetPreviewLegacyFormData')
query_context = self.build_dataset_preview_query_context(dataset_id=dataset_id, dataset_record=dataset_record, template_params=template_params, effective_filters=effective_filters)
query_object = deepcopy(query_context.get('queries', [{}])[0] if query_context.get('queries') else {})
legacy_form_data = deepcopy(query_context.get('form_data', {}))
legacy_form_data.pop('datasource', None)
legacy_form_data['metrics'] = deepcopy(query_object.get('metrics', ['count']))
legacy_form_data['columns'] = deepcopy(query_object.get('columns', []))
legacy_form_data['orderby'] = deepcopy(query_object.get('orderby', []))
legacy_form_data['annotation_layers'] = deepcopy(query_object.get('annotation_layers', []))
legacy_form_data['row_limit'] = query_object.get('row_limit', 1000)
legacy_form_data['series_limit'] = query_object.get('series_limit', 0)
legacy_form_data['url_params'] = deepcopy(query_object.get('url_params', template_params))
legacy_form_data['applied_time_extras'] = deepcopy(query_object.get('applied_time_extras', {}))
legacy_form_data['result_format'] = query_context.get('result_format', 'json')
legacy_form_data['result_type'] = query_context.get('result_type', 'query')
legacy_form_data['force'] = bool(query_context.get('force', True))
extras = query_object.get('extras')
if isinstance(extras, dict):
legacy_form_data["extras"] = deepcopy(extras)
time_range = query_object.get("time_range")
legacy_form_data['extras'] = deepcopy(extras)
time_range = query_object.get('time_range')
if time_range:
legacy_form_data["time_range"] = time_range
app_logger.reflect(
"Built Superset legacy preview form_data payload from browser-observed request shape",
extra={
"dataset_id": dataset_id,
"legacy_endpoint_inference": "POST /explore_json/form_data?form_data=... primary, POST /data?form_data=... fallback, based on observed browser traffic",
"contains_form_datasource": "datasource" in legacy_form_data,
"legacy_form_data_keys": sorted(legacy_form_data.keys()),
"legacy_extra_filters": legacy_form_data.get("extra_filters", []),
"legacy_extra_form_data": legacy_form_data.get(
"extra_form_data", {}
),
},
)
legacy_form_data['time_range'] = time_range
app_logger.reflect('Built Superset legacy preview form_data payload from browser-observed request shape', extra={'dataset_id': dataset_id, 'legacy_endpoint_inference': 'POST /explore_json/form_data?form_data=... primary, POST /data?form_data=... fallback, based on observed browser traffic', 'contains_form_datasource': 'datasource' in legacy_form_data, 'legacy_form_data_keys': sorted(legacy_form_data.keys()), 'legacy_extra_filters': legacy_form_data.get('extra_filters', []), 'legacy_extra_form_data': legacy_form_data.get('extra_form_data', {})})
logger.reflect('Belief protocol postcondition checkpoint for SupersetClientBuildDatasetPreviewLegacyFormData')
return legacy_form_data
# [/DEF:SupersetClientBuildDatasetPreviewLegacyFormData:Function]
# [DEF:SupersetClientBuildDatasetPreviewQueryContext:Function]
@@ -1768,7 +1616,7 @@ class SupersetClient:
# @PURPOSE: Convert execution mappings into Superset chart-data filter objects.
# @PRE: effective_filters may contain mapping metadata and arbitrary scalar/list values.
# @POST: Returns only valid filter dictionaries suitable for the chart-data query payload.
# @RELATION: [DEPENDS_ON] ->[SupersetClient]
# @RELATION: DEPENDS_ON -> [SupersetClient]
def _normalize_effective_filters_for_query_context(
self,
effective_filters: List[Dict[str, Any]],

View File

@@ -3,7 +3,9 @@
# @SEMANTICS: task, cleanup, retention, logs
# @PURPOSE: Implements task cleanup and retention policies, including associated logs.
# @LAYER: Core
# @RELATION: Uses TaskPersistenceService and TaskLogPersistenceService to delete old tasks and logs.
# @RELATION: DEPENDS_ON -> [TaskPersistenceService]
# @RELATION: DEPENDS_ON -> [TaskLogPersistenceService]
# @RELATION: DEPENDS_ON -> [ConfigManager]
from typing import List
from .persistence import TaskPersistenceService, TaskLogPersistenceService
@@ -13,8 +15,9 @@ from ..config_manager import ConfigManager
# [DEF:TaskCleanupService:Class]
# @PURPOSE: Provides methods to clean up old task records and their associated logs.
# @COMPLEXITY: 3
# @RELATION: DEPENDS_ON -> Task_manager
# @RELATION: DEPENDS_ON -> ThrottledSchedulerConfigurator, CALL -> -> TaskCleanupService
# @RELATION: DEPENDS_ON -> [TaskPersistenceService]
# @RELATION: DEPENDS_ON -> [TaskLogPersistenceService]
# @RELATION: DEPENDS_ON -> [ConfigManager]
class TaskCleanupService:
# [DEF:__init__:Function]
# @PURPOSE: Initializes the cleanup service with dependencies.

View File

@@ -4,8 +4,7 @@
# @SEMANTICS: file, io, zip, yaml, temp, archive, utility
# @PURPOSE: Предоставляет набор утилит для управления файловыми операциями, включая работу с временными файлами, архивами ZIP, файлами YAML и очистку директорий.
# @LAYER: Infra
# @RELATION: DEPENDS_ON -> backend.src.core.logger
# @RELATION: DEPENDS_ON -> pyyaml
# @RELATION: DEPENDS_ON -> [LoggerModule]
# @PUBLIC_API: create_temp_file, remove_empty_directories, read_dashboard_from_disk, calculate_crc32, RetentionPolicy, archive_exports, save_and_unpack_dashboard, update_yamls, create_dashboard_export, sanitize_filename, get_filename_from_headers, consolidate_archive_folders
# [SECTION: IMPORTS]

View File

@@ -3,8 +3,8 @@
# @SEMANTICS: clean-release, tui, ncurses, interactive-validator
# @PURPOSE: Interactive terminal interface for Enterprise Clean Release compliance validation.
# @LAYER: UI
# @RELATION: DEPENDS_ON -> [compliance_orchestrator]
# @RELATION: DEPENDS_ON -> [repository]
# @RELATION: DEPENDS_ON -> [ComplianceExecutionService]
# @RELATION: DEPENDS_ON -> [CleanReleaseRepository]
# @INVARIANT: TUI refuses startup in non-TTY environments; headless flow is CLI/API only.
import curses

View File

@@ -4,11 +4,11 @@
# @SEMANTICS: git, service, gitpython, repository, version_control
# @PURPOSE: Core Git logic using GitPython to manage dashboard repositories.
# @LAYER: Service
# @RELATION: USED_BY -> backend.src.api.routes.git
# @RELATION: USED_BY -> backend.src.plugins.git_plugin
# @RELATION: DEPENDS_ON -> backend.src.core.database.SessionLocal
# @RELATION: DEPENDS_ON -> backend.src.models.config.AppConfigRecord
# @RELATION: DEPENDS_ON -> backend.src.models.git.GitRepository
# @RELATION: USED_BY -> [GitApi]
# @RELATION: USED_BY -> [GitPluginModule]
# @RELATION: DEPENDS_ON -> [SessionLocal]
# @RELATION: DEPENDS_ON -> [AppConfigRecord]
# @RELATION: DEPENDS_ON -> [GitRepository]
#
# @INVARIANT: All Git operations must be performed on a valid local directory.

View File

@@ -44,7 +44,7 @@ class ResourceService:
# @PARAM: env (Environment) - The environment to fetch from
# @PARAM: tasks (List[Task]) - List of tasks to check for status
# @RETURN: List[Dict] - Dashboards with git_status and last_task fields
# @RELATION: CALLS ->[SupersetClient.get_dashboards_summary]
# @RELATION: CALLS -> [SupersetClientGetDashboardsSummary]
# @RELATION: CALLS ->[_get_git_status_for_dashboard]
# @RELATION: CALLS ->[_get_last_llm_task_for_dashboard]
async def get_dashboards_with_status(
@@ -96,7 +96,7 @@ class ResourceService:
# @PARAM: page (int) - 1-based page number.
# @PARAM: page_size (int) - Page size.
# @RETURN: Dict[str, Any] - {"dashboards": List[Dict], "total": int, "total_pages": int}
# @RELATION: CALLS ->[SupersetClient.get_dashboards_summary_page]
# @RELATION: CALLS -> [SupersetClientGetDashboardsSummaryPage]
# @RELATION: CALLS ->[_get_git_status_for_dashboard]
# @RELATION: CALLS ->[_get_last_llm_task_for_dashboard]
async def get_dashboards_page_with_status(
@@ -297,7 +297,7 @@ class ResourceService:
# @PARAM: env (Environment) - The environment to fetch from
# @PARAM: tasks (List[Task]) - List of tasks to check for status
# @RETURN: List[Dict] - Datasets with mapped_fields and last_task fields
# @RELATION: CALLS ->[SupersetClient.get_datasets_summary]
# @RELATION: CALLS -> [SupersetClientGetDatasetsSummary]
# @RELATION: CALLS ->[_get_last_task_for_resource]
async def get_datasets_with_status(
self,