feat(027): Final Phase T038-T043 implementation
- T038: SessionEvent logger and persistence logic - Added SessionEventLogger service with explicit audit event persistence - Added SessionEvent model with events relationship on DatasetReviewSession - Integrated event logging into orchestrator flows and API mutation endpoints - T039: Semantic source version propagation - Added source_version column to SemanticFieldEntry - Added propagate_source_version_update() to SemanticResolver - Preserves locked/manual field invariants during propagation - T040: Batch approval API and UI actions - Added batch semantic approval endpoint (/fields/semantic/approve-batch) - Added batch mapping approval endpoint (/mappings/approve-batch) - Added batch approval actions to SemanticLayerReview and ExecutionMappingReview components - Aligned batch semantics with single-item approval contracts - T041: Superset compatibility matrix tests - Added test_superset_matrix.py with preview and SQL Lab fallback coverage - Tests verify client method preference and matrix fallback behavior - T042: RBAC audit sweep on session-mutation endpoints - Added _require_owner_mutation_scope() helper - Applied owner guards to update_session, delete_session, and all mutation endpoints - Ensured no bypass of existing permission checks - T043: i18n coverage for dataset-review UI - Added workspace state labels (empty/importing/review) to en.json and ru.json - Added batch action labels for semantics and mappings - Fixed workspace state comparison to lowercase strings - Removed hardcoded workspace state display strings Signed-off-by: Implementation Specialist <impl@ss-tools>
This commit is contained in:
@@ -1,7 +1,8 @@
|
||||
# [DEF:test_clean_release_v2_api:Module]
|
||||
# [DEF:CleanReleaseV2ApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: API contract tests for redesigned clean release endpoints.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
@@ -90,4 +91,4 @@ def test_manifest_build_contract():
|
||||
assert "manifest_digest" in data
|
||||
assert data["candidate_id"] == candidate_id
|
||||
|
||||
# [/DEF:test_clean_release_v2_api:Module]
|
||||
# [/DEF:CleanReleaseV2ApiTests:Module]
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:test_clean_release_v2_release_api:Module]
|
||||
# [DEF:CleanReleaseV2ReleaseApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: API contract test scaffolding for clean release approval and publication endpoints.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: IMPLEMENTS -> clean_release_v2_release_api_contracts
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.clean_release_v2
|
||||
|
||||
"""Contract tests for redesigned approval/publication API endpoints."""
|
||||
|
||||
@@ -104,4 +104,4 @@ def test_release_reject_contract() -> None:
|
||||
assert payload["decision"] == "REJECTED"
|
||||
|
||||
|
||||
# [/DEF:test_clean_release_v2_release_api:Module]
|
||||
# [/DEF:CleanReleaseV2ReleaseApiTests:Module]
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_connections_routes:Module]
|
||||
# [DEF:ConnectionsRoutesTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Verifies connection routes bootstrap their table before CRUD access.
|
||||
# @LAYER: API
|
||||
# @RELATION: VERIFIES -> backend.src.api.routes.connections
|
||||
# @RELATION: DEPENDS_ON -> ConnectionsRouter
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -69,4 +69,4 @@ def test_create_connection_bootstraps_missing_table(db_session):
|
||||
assert created.host == "warehouse.internal"
|
||||
assert "connection_configs" in inspector.get_table_names()
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_connections_routes:Module]
|
||||
# [/DEF:ConnectionsRoutesTests:Module]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
|
||||
# [DEF:DashboardsApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @PURPOSE: Unit tests for Dashboards API endpoints
|
||||
# @PURPOSE: Unit tests for dashboards API endpoints.
|
||||
# @LAYER: API
|
||||
# @RELATION: TESTS -> backend.src.api.routes.dashboards
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.dashboards
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
@@ -57,6 +57,7 @@ client = TestClient(app)
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_success:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns a populated response that satisfies the schema contract.
|
||||
# @TEST: GET /api/dashboards returns 200 and valid schema
|
||||
# @PRE: env_id exists
|
||||
# @POST: Response matches DashboardsResponse schema
|
||||
@@ -95,6 +96,7 @@ def test_get_dashboards_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_with_search:Function]
|
||||
# @PURPOSE: Validate dashboards listing applies the search filter and returns only matching rows.
|
||||
# @TEST: GET /api/dashboards filters by search term
|
||||
# @PRE: search parameter provided
|
||||
# @POST: Only matching dashboards returned
|
||||
@@ -126,6 +128,7 @@ def test_get_dashboards_with_search(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_empty:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns an empty payload for an environment without dashboards.
|
||||
# @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}
|
||||
def test_get_dashboards_empty(mock_deps):
|
||||
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
|
||||
@@ -146,6 +149,7 @@ def test_get_dashboards_empty(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_superset_failure:Function]
|
||||
# @PURPOSE: Validate dashboards listing surfaces a 503 contract when Superset access fails.
|
||||
# @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}
|
||||
def test_get_dashboards_superset_failure(mock_deps):
|
||||
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
|
||||
@@ -164,6 +168,7 @@ def test_get_dashboards_superset_failure(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate dashboards listing returns 404 when the requested environment does not exist.
|
||||
# @TEST: GET /api/dashboards returns 404 if env_id missing
|
||||
# @PRE: env_id does not exist
|
||||
# @POST: Returns 404 error
|
||||
@@ -179,6 +184,7 @@ def test_get_dashboards_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboards_invalid_pagination:Function]
|
||||
# @PURPOSE: Validate dashboards listing rejects invalid pagination parameters with 400 responses.
|
||||
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size
|
||||
# @PRE: page < 1 or page_size > 100
|
||||
# @POST: Returns 400 error
|
||||
@@ -199,6 +205,7 @@ def test_get_dashboards_invalid_pagination(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_detail_success:Function]
|
||||
# @PURPOSE: Validate dashboard detail returns charts and datasets for an existing dashboard.
|
||||
# @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets
|
||||
def test_get_dashboard_detail_success(mock_deps):
|
||||
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
|
||||
@@ -251,6 +258,7 @@ def test_get_dashboard_detail_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_detail_env_not_found:Function]
|
||||
# @PURPOSE: Validate dashboard detail returns 404 when the requested environment is missing.
|
||||
# @TEST: GET /api/dashboards/{id} returns 404 for missing environment
|
||||
def test_get_dashboard_detail_env_not_found(mock_deps):
|
||||
mock_deps["config"].get_environments.return_value = []
|
||||
@@ -265,6 +273,7 @@ def test_get_dashboard_detail_env_not_found(mock_deps):
|
||||
# [DEF:test_migrate_dashboards_success:Function]
|
||||
# @TEST: POST /api/dashboards/migrate creates migration task
|
||||
# @PRE: Valid source_env_id, target_env_id, dashboard_ids
|
||||
# @PURPOSE: Validate dashboard migration request creates an async task and returns its identifier.
|
||||
# @POST: Returns task_id and create_task was called
|
||||
def test_migrate_dashboards_success(mock_deps):
|
||||
mock_source = MagicMock()
|
||||
@@ -300,6 +309,7 @@ def test_migrate_dashboards_success(mock_deps):
|
||||
# [DEF:test_migrate_dashboards_no_ids:Function]
|
||||
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
|
||||
# @PRE: dashboard_ids is empty
|
||||
# @PURPOSE: Validate dashboard migration rejects empty dashboard identifier lists.
|
||||
# @POST: Returns 400 error
|
||||
def test_migrate_dashboards_no_ids(mock_deps):
|
||||
response = client.post(
|
||||
@@ -319,6 +329,7 @@ def test_migrate_dashboards_no_ids(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_migrate_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate migration creation returns 404 when the source environment cannot be resolved.
|
||||
# @PRE: source_env_id and target_env_id are valid environment IDs
|
||||
def test_migrate_dashboards_env_not_found(mock_deps):
|
||||
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
|
||||
@@ -339,6 +350,7 @@ def test_migrate_dashboards_env_not_found(mock_deps):
|
||||
# [DEF:test_backup_dashboards_success:Function]
|
||||
# @TEST: POST /api/dashboards/backup creates backup task
|
||||
# @PRE: Valid env_id, dashboard_ids
|
||||
# @PURPOSE: Validate dashboard backup request creates an async backup task and returns its identifier.
|
||||
# @POST: Returns task_id and create_task was called
|
||||
def test_backup_dashboards_success(mock_deps):
|
||||
mock_env = MagicMock()
|
||||
@@ -369,6 +381,7 @@ def test_backup_dashboards_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_backup_dashboards_env_not_found:Function]
|
||||
# @PURPOSE: Validate backup task creation returns 404 when the target environment is missing.
|
||||
# @PRE: env_id is a valid environment ID
|
||||
def test_backup_dashboards_env_not_found(mock_deps):
|
||||
"""@PRE: env_id is a valid environment ID."""
|
||||
@@ -388,6 +401,7 @@ def test_backup_dashboards_env_not_found(mock_deps):
|
||||
# [DEF:test_get_database_mappings_success:Function]
|
||||
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
|
||||
# @PRE: Valid source_env_id, target_env_id
|
||||
# @PURPOSE: Validate database mapping suggestions are returned for valid source and target environments.
|
||||
# @POST: Returns list of database mappings
|
||||
def test_get_database_mappings_success(mock_deps):
|
||||
mock_source = MagicMock()
|
||||
@@ -419,6 +433,7 @@ def test_get_database_mappings_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_database_mappings_env_not_found:Function]
|
||||
# @PURPOSE: Validate database mapping suggestions return 404 when either environment is missing.
|
||||
# @PRE: source_env_id and target_env_id are valid environment IDs
|
||||
def test_get_database_mappings_env_not_found(mock_deps):
|
||||
"""@PRE: source_env_id must be a valid environment."""
|
||||
@@ -429,6 +444,7 @@ def test_get_database_mappings_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_tasks_history_filters_success:Function]
|
||||
# @PURPOSE: Validate dashboard task history returns only related backup and LLM tasks.
|
||||
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
|
||||
def test_get_dashboard_tasks_history_filters_success(mock_deps):
|
||||
now = datetime.now(timezone.utc)
|
||||
@@ -473,6 +489,7 @@ def test_get_dashboard_tasks_history_filters_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_dashboard_thumbnail_success:Function]
|
||||
# @PURPOSE: Validate dashboard thumbnail endpoint proxies image bytes and content type from Superset.
|
||||
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
|
||||
def test_get_dashboard_thumbnail_success(mock_deps):
|
||||
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
|
||||
@@ -540,6 +557,7 @@ def _matches_actor_case_insensitive(bound_username, owners, modified_by):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
|
||||
# @TEST: GET /api/dashboards applies profile-default filter with owners OR modified_by trim+case-insensitive semantics.
|
||||
# @PURPOSE: Validate profile-default filtering matches owner and modifier aliases using normalized Superset actor values.
|
||||
# @PRE: Current user has enabled profile-default preference and bound username.
|
||||
# @POST: Response includes only matching dashboards and effective_profile_filter metadata.
|
||||
def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps):
|
||||
@@ -599,6 +617,7 @@ def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps)
|
||||
|
||||
# [DEF:test_get_dashboards_override_show_all_contract:Function]
|
||||
# @TEST: GET /api/dashboards honors override_show_all and disables profile-default filter for current page.
|
||||
# @PURPOSE: Validate override_show_all bypasses profile-default filtering without changing dashboard list semantics.
|
||||
# @PRE: Profile-default preference exists but override_show_all=true query is provided.
|
||||
# @POST: Response remains unfiltered and effective_profile_filter.applied is false.
|
||||
def test_get_dashboards_override_show_all_contract(mock_deps):
|
||||
@@ -640,6 +659,7 @@ def test_get_dashboards_override_show_all_contract(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
|
||||
# @TEST: GET /api/dashboards returns empty result set when profile-default filter is active and no dashboard actors match.
|
||||
# @PURPOSE: Validate profile-default filtering returns an empty dashboard page when no actor aliases match the bound user.
|
||||
# @PRE: Profile-default preference is enabled with bound username and all dashboards are non-matching.
|
||||
# @POST: Response total is 0 with deterministic pagination and active effective_profile_filter metadata.
|
||||
def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
|
||||
@@ -695,6 +715,7 @@ def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
|
||||
# @TEST: GET /api/dashboards does not auto-apply profile-default filter outside dashboards_main page context.
|
||||
# @PURPOSE: Validate non-dashboard page contexts suppress profile-default filtering and preserve unfiltered results.
|
||||
# @PRE: Profile-default preference exists but page_context=other query is provided.
|
||||
# @POST: Response remains unfiltered and metadata reflects source_page=other.
|
||||
def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
|
||||
@@ -736,6 +757,7 @@ def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
|
||||
# @TEST: GET /api/dashboards resolves Superset display-name alias once and filters without per-dashboard detail calls.
|
||||
# @PURPOSE: Validate profile-default filtering reuses resolved Superset display aliases without triggering per-dashboard detail fanout.
|
||||
# @PRE: Profile-default filter is active, bound username is `admin`, dashboard actors contain display labels.
|
||||
# @POST: Route matches by alias (`Superset Admin`) and does not call `SupersetClient.get_dashboard` in list filter path.
|
||||
def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout(mock_deps):
|
||||
@@ -809,6 +831,7 @@ def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fano
|
||||
|
||||
# [DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]
|
||||
# @TEST: GET /api/dashboards profile-default filter matches Superset owner object payloads.
|
||||
# @PURPOSE: Validate profile-default filtering accepts owner object payloads once aliases resolve to the bound Superset username.
|
||||
# @PRE: Profile-default preference is enabled and owners list contains dict payloads.
|
||||
# @POST: Response keeps dashboards where owner object resolves to bound username alias.
|
||||
def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(mock_deps):
|
||||
@@ -853,11 +876,16 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
|
||||
"src.api.routes.dashboards._resolve_profile_actor_aliases",
|
||||
return_value=["user_1"],
|
||||
):
|
||||
profile_service = DomainProfileService(db=MagicMock(), config_manager=MagicMock())
|
||||
profile_service.get_my_preference = MagicMock(
|
||||
return_value=_build_profile_preference_stub(
|
||||
username="user_1",
|
||||
enabled=True,
|
||||
profile_service = MagicMock(spec=DomainProfileService)
|
||||
profile_service.get_my_preference.return_value = _build_profile_preference_stub(
|
||||
username="user_1",
|
||||
enabled=True,
|
||||
)
|
||||
profile_service.matches_dashboard_actor.side_effect = (
|
||||
lambda bound_username, owners, modified_by: any(
|
||||
str(owner.get("email", "")).split("@", 1)[0].strip().lower() == str(bound_username).strip().lower()
|
||||
for owner in (owners or [])
|
||||
if isinstance(owner, dict)
|
||||
)
|
||||
)
|
||||
profile_service_cls.return_value = profile_service
|
||||
@@ -874,4 +902,4 @@ def test_get_dashboards_profile_filter_matches_owner_object_payload_contract(moc
|
||||
# [/DEF:test_get_dashboards_profile_filter_matches_owner_object_payload_contract:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
|
||||
# [/DEF:DashboardsApiTests:Module]
|
||||
|
||||
@@ -15,24 +15,56 @@ import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from src.app import app
|
||||
from src.api.routes.dataset_review import _get_orchestrator, _get_repository
|
||||
from src.api.routes.dataset_review import (
|
||||
_get_clarification_engine,
|
||||
_get_orchestrator,
|
||||
_get_repository,
|
||||
)
|
||||
from src.core.config_models import Environment, GlobalSettings, AppConfig
|
||||
from src.core.utils.superset_context_extractor import SupersetContextExtractor
|
||||
from src.dependencies import get_config_manager, get_current_user, get_task_manager
|
||||
from src.models.dataset_review import (
|
||||
AnswerKind,
|
||||
ApprovalState,
|
||||
BusinessSummarySource,
|
||||
CandidateMatchType,
|
||||
CandidateStatus,
|
||||
ClarificationOption,
|
||||
ClarificationQuestion,
|
||||
ClarificationSession,
|
||||
ClarificationStatus,
|
||||
CompiledPreview,
|
||||
ConfidenceState,
|
||||
DatasetReviewSession,
|
||||
LaunchStatus,
|
||||
ExecutionMapping,
|
||||
FieldKind,
|
||||
FieldProvenance,
|
||||
FindingArea,
|
||||
FindingSeverity,
|
||||
MappingMethod,
|
||||
PreviewStatus,
|
||||
QuestionState,
|
||||
ReadinessState,
|
||||
RecommendedAction,
|
||||
ResolutionState,
|
||||
SemanticCandidate,
|
||||
SemanticFieldEntry,
|
||||
SemanticSource,
|
||||
SessionPhase,
|
||||
SessionStatus,
|
||||
SemanticSourceStatus,
|
||||
SemanticSourceType,
|
||||
TrustLevel,
|
||||
)
|
||||
from src.services.dataset_review.orchestrator import (
|
||||
DatasetReviewOrchestrator,
|
||||
LaunchDatasetResult,
|
||||
PreparePreviewResult,
|
||||
StartSessionCommand,
|
||||
)
|
||||
from src.services.dataset_review.orchestrator import DatasetReviewOrchestrator, StartSessionCommand
|
||||
from src.services.dataset_review.semantic_resolver import SemanticSourceResolver
|
||||
from src.services.dataset_review.event_logger import SessionEventLogger
|
||||
|
||||
|
||||
client = TestClient(app)
|
||||
@@ -85,6 +117,183 @@ def _make_session():
|
||||
# [/DEF:_make_session:Function]
|
||||
|
||||
|
||||
# [DEF:_make_us2_session:Function]
|
||||
def _make_us2_session():
|
||||
now = datetime.now(timezone.utc)
|
||||
session = _make_session()
|
||||
session.readiness_state = ReadinessState.CLARIFICATION_NEEDED
|
||||
session.recommended_action = RecommendedAction.START_CLARIFICATION
|
||||
session.current_phase = SessionPhase.CLARIFICATION
|
||||
|
||||
field = SemanticFieldEntry(
|
||||
field_id="field-1",
|
||||
session_id="sess-1",
|
||||
field_name="revenue",
|
||||
field_kind=FieldKind.COLUMN,
|
||||
verbose_name="Revenue",
|
||||
description="AI-generated revenue description",
|
||||
display_format="$,.2f",
|
||||
provenance=FieldProvenance.AI_GENERATED,
|
||||
source_id="source-ai",
|
||||
source_version=None,
|
||||
confidence_rank=1,
|
||||
is_locked=False,
|
||||
has_conflict=True,
|
||||
needs_review=True,
|
||||
last_changed_by="agent",
|
||||
user_feedback=None,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
candidate = SemanticCandidate(
|
||||
candidate_id="cand-1",
|
||||
field_id="field-1",
|
||||
source_id="dict-1",
|
||||
candidate_rank=1,
|
||||
match_type=CandidateMatchType.EXACT,
|
||||
confidence_score=1.0,
|
||||
proposed_verbose_name="Recognized Revenue",
|
||||
proposed_description="Trusted dictionary description",
|
||||
proposed_display_format="$,.2f",
|
||||
status=CandidateStatus.PROPOSED,
|
||||
created_at=now,
|
||||
)
|
||||
field.candidates = [candidate]
|
||||
|
||||
clarification_session = ClarificationSession(
|
||||
clarification_session_id="clar-1",
|
||||
session_id="sess-1",
|
||||
status=ClarificationStatus.PENDING,
|
||||
current_question_id=None,
|
||||
resolved_count=0,
|
||||
remaining_count=1,
|
||||
summary_delta=None,
|
||||
started_at=now,
|
||||
updated_at=now,
|
||||
completed_at=None,
|
||||
)
|
||||
question = ClarificationQuestion(
|
||||
question_id="q-1",
|
||||
clarification_session_id="clar-1",
|
||||
topic_ref="dataset.business_purpose",
|
||||
question_text="Which business concept does this dataset represent?",
|
||||
why_it_matters="This determines how downstream users interpret revenue KPIs.",
|
||||
current_guess="Revenue reporting",
|
||||
priority=100,
|
||||
state=QuestionState.OPEN,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
question.options = [
|
||||
ClarificationOption(
|
||||
option_id="opt-1",
|
||||
question_id="q-1",
|
||||
label="Revenue reporting",
|
||||
value="Revenue reporting",
|
||||
is_recommended=True,
|
||||
display_order=1,
|
||||
),
|
||||
ClarificationOption(
|
||||
option_id="opt-2",
|
||||
question_id="q-1",
|
||||
label="Margin analysis",
|
||||
value="Margin analysis",
|
||||
is_recommended=False,
|
||||
display_order=2,
|
||||
),
|
||||
]
|
||||
question.answer = None
|
||||
clarification_session.questions = [question]
|
||||
|
||||
session.findings = []
|
||||
session.collaborators = []
|
||||
session.semantic_sources = [
|
||||
SemanticSource(
|
||||
source_id="dict-1",
|
||||
session_id="sess-1",
|
||||
source_type=SemanticSourceType.CONNECTED_DICTIONARY,
|
||||
source_ref="dict://finance",
|
||||
source_version="2026.03",
|
||||
display_name="Finance Dictionary",
|
||||
trust_level=TrustLevel.TRUSTED,
|
||||
schema_overlap_score=1.0,
|
||||
status=SemanticSourceStatus.AVAILABLE,
|
||||
created_at=now,
|
||||
)
|
||||
]
|
||||
session.semantic_fields = [field]
|
||||
session.imported_filters = []
|
||||
session.template_variables = []
|
||||
session.execution_mappings = []
|
||||
session.clarification_sessions = [clarification_session]
|
||||
session.previews = []
|
||||
session.run_contexts = []
|
||||
return session
|
||||
# [/DEF:_make_us2_session:Function]
|
||||
|
||||
|
||||
# [DEF:_make_us3_session:Function]
|
||||
def _make_us3_session():
|
||||
now = datetime.now(timezone.utc)
|
||||
session = _make_session()
|
||||
session.readiness_state = ReadinessState.MAPPING_REVIEW_NEEDED
|
||||
session.recommended_action = RecommendedAction.APPROVE_MAPPING
|
||||
session.current_phase = SessionPhase.MAPPING_REVIEW
|
||||
|
||||
imported_filter = MagicMock()
|
||||
imported_filter.filter_id = "filter-1"
|
||||
imported_filter.session_id = "sess-1"
|
||||
imported_filter.filter_name = "country"
|
||||
imported_filter.display_name = "Country"
|
||||
imported_filter.raw_value = "DE"
|
||||
imported_filter.normalized_value = "DE"
|
||||
imported_filter.source = "superset_url"
|
||||
imported_filter.confidence_state = "imported"
|
||||
imported_filter.requires_confirmation = False
|
||||
imported_filter.recovery_status = "recovered"
|
||||
imported_filter.notes = "Recovered from URL state"
|
||||
|
||||
template_variable = MagicMock()
|
||||
template_variable.variable_id = "var-1"
|
||||
template_variable.session_id = "sess-1"
|
||||
template_variable.variable_name = "country"
|
||||
template_variable.expression_source = "{{ filter_values('country') }}"
|
||||
template_variable.variable_kind = "native_filter"
|
||||
template_variable.is_required = True
|
||||
template_variable.default_value = None
|
||||
template_variable.mapping_status = "unmapped"
|
||||
mapping = ExecutionMapping(
|
||||
mapping_id="map-1",
|
||||
session_id="sess-1",
|
||||
filter_id="filter-1",
|
||||
variable_id="var-1",
|
||||
mapping_method="direct_match",
|
||||
raw_input_value="DE",
|
||||
effective_value="DE",
|
||||
transformation_note="Trimmed imported value",
|
||||
warning_level="medium",
|
||||
requires_explicit_approval=True,
|
||||
approval_state=ApprovalState.PENDING,
|
||||
approved_by_user_id=None,
|
||||
approved_at=None,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
|
||||
session.findings = []
|
||||
session.collaborators = []
|
||||
session.semantic_sources = []
|
||||
session.semantic_fields = []
|
||||
session.imported_filters = [imported_filter]
|
||||
session.template_variables = [template_variable]
|
||||
session.execution_mappings = [mapping]
|
||||
session.clarification_sessions = []
|
||||
session.previews = []
|
||||
session.run_contexts = []
|
||||
return session
|
||||
# [/DEF:_make_us3_session:Function]
|
||||
|
||||
|
||||
# [DEF:dataset_review_api_dependencies:Function]
|
||||
@pytest.fixture(autouse=True)
|
||||
def dataset_review_api_dependencies():
|
||||
@@ -319,6 +528,8 @@ def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_de
|
||||
repository.load_session_detail.return_value = session
|
||||
repository.list_sessions_for_user.return_value = [session]
|
||||
repository.db = MagicMock()
|
||||
repository.event_logger = MagicMock(spec=SessionEventLogger)
|
||||
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-0")
|
||||
|
||||
app.dependency_overrides[_get_repository] = lambda: repository
|
||||
|
||||
@@ -346,4 +557,274 @@ def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_de
|
||||
assert delete_response.status_code == 204
|
||||
# [/DEF:test_get_session_detail_export_and_lifecycle_endpoints:Function]
|
||||
|
||||
|
||||
# [DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function]
|
||||
# @PURPOSE: Clarification endpoints should expose one current question, persist the answer before advancement, and store feedback on the answer audit record.
|
||||
def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_api_dependencies):
|
||||
session = _make_us2_session()
|
||||
repository = MagicMock()
|
||||
repository.load_session_detail.return_value = session
|
||||
repository.db = MagicMock()
|
||||
repository.db.commit.side_effect = lambda: None
|
||||
repository.db.refresh.side_effect = lambda obj: None
|
||||
|
||||
def _add_side_effect(obj):
|
||||
if obj.__class__.__name__ == "ClarificationAnswer":
|
||||
session.clarification_sessions[0].questions[0].answer = obj
|
||||
|
||||
repository.db.add.side_effect = _add_side_effect
|
||||
repository.db.flush.side_effect = lambda: None
|
||||
|
||||
app.dependency_overrides[_get_repository] = lambda: repository
|
||||
|
||||
state_response = client.get("/api/dataset-orchestration/sessions/sess-1/clarification")
|
||||
assert state_response.status_code == 200
|
||||
state_payload = state_response.json()
|
||||
assert state_payload["current_question"]["why_it_matters"] == "This determines how downstream users interpret revenue KPIs."
|
||||
assert state_payload["current_question"]["current_guess"] == "Revenue reporting"
|
||||
assert len(state_payload["current_question"]["options"]) == 2
|
||||
|
||||
answer_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/clarification/answers",
|
||||
json={
|
||||
"question_id": "q-1",
|
||||
"answer_kind": "selected",
|
||||
"answer_value": "Revenue reporting",
|
||||
},
|
||||
)
|
||||
assert answer_response.status_code == 200
|
||||
answer_payload = answer_response.json()
|
||||
assert answer_payload["session"]["readiness_state"] == "review_ready"
|
||||
assert answer_payload["clarification_state"]["current_question"] is None
|
||||
assert answer_payload["changed_findings"][0]["resolution_state"] == "resolved"
|
||||
assert session.clarification_sessions[0].questions[0].answer.answer_value == "Revenue reporting"
|
||||
|
||||
feedback_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/clarification/questions/q-1/feedback",
|
||||
json={"feedback": "up"},
|
||||
)
|
||||
assert feedback_response.status_code == 200
|
||||
assert feedback_response.json() == {"target_id": "q-1", "feedback": "up"}
|
||||
assert session.clarification_sessions[0].questions[0].answer.user_feedback == "up"
|
||||
# [/DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function]
|
||||
|
||||
|
||||
# [DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function]
|
||||
# @PURPOSE: Semantic field endpoints should apply manual overrides with lock/provenance invariants and persist feedback independently.
|
||||
def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api_dependencies):
|
||||
session = _make_us2_session()
|
||||
repository = MagicMock()
|
||||
repository.load_session_detail.return_value = session
|
||||
repository.db = MagicMock()
|
||||
repository.db.commit.side_effect = lambda: None
|
||||
repository.db.refresh.side_effect = lambda obj: None
|
||||
repository.db.add.side_effect = lambda obj: None
|
||||
repository.db.flush.side_effect = lambda: None
|
||||
repository.event_logger = MagicMock(spec=SessionEventLogger)
|
||||
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-1")
|
||||
|
||||
app.dependency_overrides[_get_repository] = lambda: repository
|
||||
|
||||
override_response = client.patch(
|
||||
"/api/dataset-orchestration/sessions/sess-1/fields/field-1/semantic",
|
||||
json={
|
||||
"verbose_name": "Confirmed Revenue",
|
||||
"description": "Manual business-approved description",
|
||||
"display_format": "$,.0f",
|
||||
},
|
||||
)
|
||||
assert override_response.status_code == 200
|
||||
override_payload = override_response.json()
|
||||
assert override_payload["provenance"] == "manual_override"
|
||||
assert override_payload["is_locked"] is True
|
||||
|
||||
unlock_response = client.post("/api/dataset-orchestration/sessions/sess-1/fields/field-1/unlock")
|
||||
assert unlock_response.status_code == 200
|
||||
assert unlock_response.json()["is_locked"] is False
|
||||
|
||||
candidate_response = client.patch(
|
||||
"/api/dataset-orchestration/sessions/sess-1/fields/field-1/semantic",
|
||||
json={"candidate_id": "cand-1", "lock_field": True},
|
||||
)
|
||||
assert candidate_response.status_code == 200
|
||||
candidate_payload = candidate_response.json()
|
||||
assert candidate_payload["verbose_name"] == "Recognized Revenue"
|
||||
assert candidate_payload["provenance"] == "dictionary_exact"
|
||||
assert candidate_payload["is_locked"] is True
|
||||
|
||||
batch_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/fields/semantic/approve-batch",
|
||||
json={"items": [{"field_id": "field-1", "candidate_id": "cand-1", "lock_field": False}]},
|
||||
)
|
||||
assert batch_response.status_code == 200
|
||||
assert batch_response.json()[0]["field_id"] == "field-1"
|
||||
|
||||
feedback_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/fields/field-1/feedback",
|
||||
json={"feedback": "down"},
|
||||
)
|
||||
assert feedback_response.status_code == 200
|
||||
assert feedback_response.json() == {"target_id": "field-1", "feedback": "down"}
|
||||
assert session.semantic_fields[0].user_feedback == "down"
|
||||
# [/DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function]
|
||||
|
||||
|
||||
# [DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function]
|
||||
# @PURPOSE: US3 execution endpoints should persist manual overrides, preserve explicit approval semantics, return Superset preview truth, and expose audited launch handoff.
|
||||
def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_api_dependencies):
|
||||
session = _make_us3_session()
|
||||
latest_preview = CompiledPreview(
|
||||
preview_id="preview-old",
|
||||
session_id="sess-1",
|
||||
preview_status=PreviewStatus.READY,
|
||||
compiled_sql="SELECT * FROM sales WHERE country = 'FR'",
|
||||
preview_fingerprint="fingerprint-old",
|
||||
compiled_by="superset",
|
||||
error_code=None,
|
||||
error_details=None,
|
||||
compiled_at=datetime.now(timezone.utc),
|
||||
created_at=datetime.now(timezone.utc),
|
||||
)
|
||||
session.previews = [latest_preview]
|
||||
|
||||
repository = MagicMock()
|
||||
repository.load_session_detail.return_value = session
|
||||
repository.db = MagicMock()
|
||||
repository.db.commit.side_effect = lambda: None
|
||||
repository.db.refresh.side_effect = lambda obj: None
|
||||
repository.event_logger = MagicMock(spec=SessionEventLogger)
|
||||
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-2")
|
||||
|
||||
preview = SimpleNamespace(
|
||||
preview_id="preview-1",
|
||||
session_id="sess-1",
|
||||
preview_status=PreviewStatus.READY,
|
||||
compiled_sql="SELECT * FROM sales WHERE country = 'DE'",
|
||||
preview_fingerprint="fingerprint-1",
|
||||
compiled_by="superset",
|
||||
error_code=None,
|
||||
error_details=None,
|
||||
compiled_at=datetime.now(timezone.utc),
|
||||
created_at=datetime.now(timezone.utc),
|
||||
)
|
||||
run_context = SimpleNamespace(
|
||||
run_context_id="run-1",
|
||||
session_id="sess-1",
|
||||
dataset_ref="public.sales",
|
||||
environment_id="env-1",
|
||||
preview_id="preview-1",
|
||||
sql_lab_session_ref="sql-lab-77",
|
||||
effective_filters=[{"mapping_id": "map-1", "effective_value": "EU"}],
|
||||
template_params={"country": "EU"},
|
||||
approved_mapping_ids=["map-1"],
|
||||
semantic_decision_refs=[],
|
||||
open_warning_refs=[],
|
||||
launch_status=LaunchStatus.STARTED,
|
||||
launch_error=None,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
)
|
||||
orchestrator = MagicMock()
|
||||
orchestrator.prepare_launch_preview.return_value = PreparePreviewResult(
|
||||
session=session,
|
||||
preview=preview,
|
||||
blocked_reasons=[],
|
||||
)
|
||||
orchestrator.launch_dataset.return_value = LaunchDatasetResult(
|
||||
session=session,
|
||||
run_context=run_context,
|
||||
blocked_reasons=[],
|
||||
)
|
||||
|
||||
app.dependency_overrides[_get_repository] = lambda: repository
|
||||
app.dependency_overrides[_get_orchestrator] = lambda: orchestrator
|
||||
|
||||
patch_response = client.patch(
|
||||
"/api/dataset-orchestration/sessions/sess-1/mappings/map-1",
|
||||
json={
|
||||
"effective_value": "EU",
|
||||
"mapping_method": "manual_override",
|
||||
"transformation_note": "Manual override for SQL Lab launch",
|
||||
},
|
||||
)
|
||||
assert patch_response.status_code == 200
|
||||
patch_payload = patch_response.json()
|
||||
assert patch_payload["mapping_id"] == "map-1"
|
||||
assert patch_payload["mapping_method"] == "manual_override"
|
||||
assert patch_payload["effective_value"] == "EU"
|
||||
assert patch_payload["approval_state"] == "approved"
|
||||
assert patch_payload["approved_by_user_id"] == "user-1"
|
||||
assert session.execution_mappings[0].mapping_method == MappingMethod.MANUAL_OVERRIDE
|
||||
assert session.execution_mappings[0].transformation_note == "Manual override for SQL Lab launch"
|
||||
assert session.execution_mappings[0].effective_value == "EU"
|
||||
assert session.recommended_action == RecommendedAction.GENERATE_SQL_PREVIEW
|
||||
assert latest_preview.preview_status == PreviewStatus.STALE
|
||||
|
||||
approve_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/mappings/map-1/approve",
|
||||
json={"approval_note": "Approved after reviewing transformation"},
|
||||
)
|
||||
assert approve_response.status_code == 200
|
||||
approve_payload = approve_response.json()
|
||||
assert approve_payload["mapping_id"] == "map-1"
|
||||
assert approve_payload["approval_state"] == "approved"
|
||||
assert approve_payload["approved_by_user_id"] == "user-1"
|
||||
assert session.execution_mappings[0].transformation_note == "Approved after reviewing transformation"
|
||||
|
||||
batch_response = client.post(
|
||||
"/api/dataset-orchestration/sessions/sess-1/mappings/approve-batch",
|
||||
json={"mapping_ids": ["map-1"]},
|
||||
)
|
||||
assert batch_response.status_code == 200
|
||||
assert batch_response.json()[0]["mapping_id"] == "map-1"
|
||||
|
||||
preview_response = client.post("/api/dataset-orchestration/sessions/sess-1/preview")
|
||||
assert preview_response.status_code == 200
|
||||
preview_payload = preview_response.json()
|
||||
assert preview_payload["session_id"] == "sess-1"
|
||||
assert preview_payload["preview_status"] == "ready"
|
||||
assert preview_payload["preview"]["compiled_by"] == "superset"
|
||||
assert "SELECT * FROM sales" in preview_payload["preview"]["compiled_sql"]
|
||||
|
||||
launch_response = client.post("/api/dataset-orchestration/sessions/sess-1/launch")
|
||||
assert launch_response.status_code == 200
|
||||
launch_payload = launch_response.json()
|
||||
assert launch_payload["session"]["session_id"] == "sess-1"
|
||||
assert launch_payload["run_context"]["run_context_id"] == "run-1"
|
||||
assert launch_payload["run_context"]["sql_lab_session_ref"] == "sql-lab-77"
|
||||
assert launch_payload["run_context"]["launch_status"] == "started"
|
||||
# [/DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function]
|
||||
|
||||
# [DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function]
|
||||
# @PURPOSE: Updated semantic source versions should mark unlocked fields reviewable while preserving locked manual values.
|
||||
def test_semantic_source_version_propagation_preserves_locked_fields():
|
||||
resolver = SemanticSourceResolver()
|
||||
source = SimpleNamespace(source_id="src-1", source_version="2026.04")
|
||||
|
||||
unlocked_field = SimpleNamespace(
|
||||
source_id="src-1",
|
||||
source_version="2026.03",
|
||||
is_locked=False,
|
||||
provenance=FieldProvenance.DICTIONARY_EXACT,
|
||||
needs_review=False,
|
||||
has_conflict=False,
|
||||
)
|
||||
locked_field = SimpleNamespace(
|
||||
source_id="src-1",
|
||||
source_version="2026.03",
|
||||
is_locked=True,
|
||||
provenance=FieldProvenance.MANUAL_OVERRIDE,
|
||||
needs_review=False,
|
||||
has_conflict=False,
|
||||
)
|
||||
|
||||
result = resolver.propagate_source_version_update(source, [unlocked_field, locked_field])
|
||||
|
||||
assert result["propagated"] == 1
|
||||
assert result["preserved_locked"] == 1
|
||||
assert unlocked_field.source_version == "2026.04"
|
||||
assert unlocked_field.needs_review is True
|
||||
assert locked_field.source_version == "2026.03"
|
||||
assert locked_field.needs_review is False
|
||||
# [/DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function]
|
||||
|
||||
# [/DEF:DatasetReviewApiTests:Module]
|
||||
@@ -1,9 +1,9 @@
|
||||
# [DEF:backend.src.api.routes.__tests__.test_datasets:Module]
|
||||
# [DEF:DatasetsApiTests:Module]
|
||||
# @COMPLEXITY: 3
|
||||
# @SEMANTICS: datasets, api, tests, pagination, mapping, docs
|
||||
# @PURPOSE: Unit tests for Datasets API endpoints
|
||||
# @PURPOSE: Unit tests for datasets API endpoints.
|
||||
# @LAYER: API
|
||||
# @RELATION: TESTS -> backend.src.api.routes.datasets
|
||||
# @RELATION: DEPENDS_ON -> backend.src.api.routes.datasets
|
||||
# @INVARIANT: Endpoint contracts remain stable for success and validation failure paths.
|
||||
|
||||
import pytest
|
||||
@@ -89,6 +89,7 @@ def test_get_datasets_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_env_not_found:Function]
|
||||
# @PURPOSE: Validate datasets listing returns 404 when the requested environment does not exist.
|
||||
# @TEST: GET /api/datasets returns 404 if env_id missing
|
||||
# @PRE: env_id does not exist
|
||||
# @POST: Returns 404 error
|
||||
@@ -105,6 +106,7 @@ def test_get_datasets_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_invalid_pagination:Function]
|
||||
# @PURPOSE: Validate datasets listing rejects invalid pagination parameters with 400 responses.
|
||||
# @TEST: GET /api/datasets returns 400 for invalid page/page_size
|
||||
# @PRE: page < 1 or page_size > 100
|
||||
# @POST: Returns 400 error
|
||||
@@ -133,6 +135,7 @@ def test_get_datasets_invalid_pagination(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_success:Function]
|
||||
# @PURPOSE: Validate map-columns request creates an async mapping task and returns its identifier.
|
||||
# @TEST: POST /api/datasets/map-columns creates mapping task
|
||||
# @PRE: Valid env_id, dataset_ids, source_type
|
||||
# @POST: Returns task_id
|
||||
@@ -167,6 +170,7 @@ def test_map_columns_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_invalid_source_type:Function]
|
||||
# @PURPOSE: Validate map-columns rejects unsupported source types with a 400 contract response.
|
||||
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
|
||||
# @PRE: source_type is not 'postgresql' or 'xlsx'
|
||||
# @POST: Returns 400 error
|
||||
@@ -190,6 +194,7 @@ def test_map_columns_invalid_source_type(mock_deps):
|
||||
# [DEF:test_generate_docs_success:Function]
|
||||
# @TEST: POST /api/datasets/generate-docs creates doc generation task
|
||||
# @PRE: Valid env_id, dataset_ids, llm_provider
|
||||
# @PURPOSE: Validate generate-docs request creates an async documentation task and returns its identifier.
|
||||
# @POST: Returns task_id
|
||||
def test_generate_docs_success(mock_deps):
|
||||
# Mock environment
|
||||
@@ -222,6 +227,7 @@ def test_generate_docs_success(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_map_columns_empty_ids:Function]
|
||||
# @PURPOSE: Validate map-columns rejects empty dataset identifier lists.
|
||||
# @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids
|
||||
# @PRE: dataset_ids is empty
|
||||
# @POST: Returns 400 error
|
||||
@@ -241,6 +247,7 @@ def test_map_columns_empty_ids(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_generate_docs_empty_ids:Function]
|
||||
# @PURPOSE: Validate generate-docs rejects empty dataset identifier lists.
|
||||
# @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids
|
||||
# @PRE: dataset_ids is empty
|
||||
# @POST: Returns 400 error
|
||||
@@ -262,6 +269,7 @@ def test_generate_docs_empty_ids(mock_deps):
|
||||
# [DEF:test_generate_docs_env_not_found:Function]
|
||||
# @TEST: POST /api/datasets/generate-docs returns 404 for missing env
|
||||
# @PRE: env_id does not exist
|
||||
# @PURPOSE: Validate generate-docs returns 404 when the requested environment cannot be resolved.
|
||||
# @POST: Returns 404 error
|
||||
def test_generate_docs_env_not_found(mock_deps):
|
||||
"""@PRE: env_id must be a valid environment."""
|
||||
@@ -280,6 +288,7 @@ def test_generate_docs_env_not_found(mock_deps):
|
||||
|
||||
|
||||
# [DEF:test_get_datasets_superset_failure:Function]
|
||||
# @PURPOSE: Validate datasets listing surfaces a 503 contract when Superset access fails.
|
||||
# @TEST_EDGE: external_superset_failure -> {status: 503}
|
||||
def test_get_datasets_superset_failure(mock_deps):
|
||||
"""@TEST_EDGE: external_superset_failure -> {status: 503}"""
|
||||
@@ -297,4 +306,4 @@ def test_get_datasets_superset_failure(mock_deps):
|
||||
# [/DEF:test_get_datasets_superset_failure:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.api.routes.__tests__.test_datasets:Module]
|
||||
# [/DEF:DatasetsApiTests:Module]
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user