test semantics

This commit is contained in:
2026-03-21 15:23:06 +03:00
parent 9b47b9b667
commit 0b275ed3d1
14 changed files with 467 additions and 101 deletions

View File

@@ -0,0 +1,43 @@
# [DEF:RoutesTestsConftest:Module]
# @COMPLEXITY: 1
# @PURPOSE: Shared low-fidelity test doubles for API route test modules.
class FakeQuery:
"""Shared chainable query stub for route tests.
Warning: predicate arguments passed to filter() are recorded only for inspection
and are not evaluated; result sets remain predicate-blind.
"""
def __init__(self, rows):
self._rows = list(rows)
self._seen_predicates = []
def filter(self, *args, **kwargs):
# Predicate-aware bookkeeping only; no predicate evaluation is performed.
self._seen_predicates.append((args, kwargs))
return self
def order_by(self, *args, **kwargs):
return self
def limit(self, limit):
self._rows = self._rows[:limit]
return self
def offset(self, offset):
self._rows = self._rows[offset:]
return self
def first(self):
return self._rows[0] if self._rows else None
def all(self):
return list(self._rows)
def count(self):
return len(self._rows)
# [/DEF:RoutesTestsConftest:Module]

View File

@@ -60,6 +60,7 @@ class _FakeTask:
# [/DEF:_FakeTask:Class]
# @DEBT: Divergent _FakeTaskManager definition. Canonical version should be in conftest.py. Authz variant is missing get_all_tasks().
# [DEF:_FakeTaskManager:Class]
# @RELATION: BINDS_TO -> [AssistantApiTests]
# @COMPLEXITY: 2
@@ -167,6 +168,7 @@ class _FakeQuery:
self.items = items
def filter(self, *args, **kwargs):
# @INVARIANT: filter() is predicate-blind; returns all records regardless of user_id scope
return self
def order_by(self, *args, **kwargs):

View File

@@ -61,6 +61,7 @@ class _FakeTask:
# [/DEF:_FakeTask:Class]
# @DEBT: Divergent _FakeTaskManager definition. Canonical version should be in conftest.py. Authz variant is missing get_all_tasks().
# [DEF:_FakeTaskManager:Class]
# @RELATION: BINDS_TO -> [TestAssistantAuthz]
# @COMPLEXITY: 2
@@ -85,8 +86,14 @@ class _FakeTaskManager:
def get_tasks(self, limit=20, offset=0):
return [x[3] for x in self._created][offset : offset + limit]
def get_all_tasks(self):
raise NotImplementedError(
"get_all_tasks not implemented in authz FakeTaskManager"
)
# [/DEF:_FakeTaskManager:Class]
# @CONTRACT: Partial ConfigManager stub for authz tests. Missing: get_config().
# [DEF:_FakeConfigManager:Class]
# @RELATION: BINDS_TO -> [TestAssistantAuthz]
# @COMPLEXITY: 1
@@ -101,6 +108,11 @@ class _FakeConfigManager:
SimpleNamespace(id="prod", name="Production"),
]
def get_config(self):
raise NotImplementedError(
"get_config not implemented in authz fake — add if route under test requires it"
)
# [/DEF:_FakeConfigManager:Class]
# [DEF:_admin_user:Function]
@@ -151,6 +163,7 @@ class _FakeQuery:
self._rows = list(rows)
def filter(self, *args, **kwargs):
# @INVARIANT: filter() is predicate-blind; returns all records regardless of user_id scope
return self
def order_by(self, *args, **kwargs):

View File

@@ -15,9 +15,13 @@ from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
# Force SQLite in-memory for database module imports.
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["TASKS_DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["AUTH_DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["ENVIRONMENT"] = "testing"
backend_dir = str(Path(__file__).parent.parent.parent.parent.resolve())

View File

@@ -78,8 +78,12 @@ def _make_user():
SimpleNamespace(resource="dataset:session", action="MANAGE"),
SimpleNamespace(resource="dataset:execution:launch", action="EXECUTE"),
]
dataset_review_role = SimpleNamespace(name="DatasetReviewOperator", permissions=permissions)
dataset_review_role = SimpleNamespace(
name="DatasetReviewOperator", permissions=permissions
)
return SimpleNamespace(id="user-1", username="tester", roles=[dataset_review_role])
# [/DEF:_make_user:Function]
@@ -95,9 +99,13 @@ def _make_config_manager():
)
config = AppConfig(environments=[env], settings=GlobalSettings())
manager = MagicMock()
manager.get_environment.side_effect = lambda env_id: env if env_id == "env-1" else None
manager.get_environment.side_effect = (
lambda env_id: env if env_id == "env-1" else None
)
manager.get_config.return_value = config
return manager
# [/DEF:_make_config_manager:Function]
@@ -122,6 +130,8 @@ def _make_session():
updated_at=now,
last_activity_at=now,
)
# [/DEF:_make_session:Function]
@@ -238,6 +248,8 @@ def _make_us2_session():
session.previews = []
session.run_contexts = []
return session
# [/DEF:_make_us2_session:Function]
@@ -250,7 +262,7 @@ def _make_us3_session():
session.recommended_action = RecommendedAction.APPROVE_MAPPING
session.current_phase = SessionPhase.MAPPING_REVIEW
imported_filter = MagicMock()
imported_filter = MagicMock() # @RISK: No spec= guard — enum/field contract changes are undetectable at test time.
imported_filter.filter_id = "filter-1"
imported_filter.session_id = "sess-1"
imported_filter.filter_name = "country"
@@ -263,7 +275,7 @@ def _make_us3_session():
imported_filter.recovery_status = "recovered"
imported_filter.notes = "Recovered from URL state"
template_variable = MagicMock()
template_variable = MagicMock() # @RISK: No spec= guard — enum/field contract changes are undetectable at test time.
template_variable.variable_id = "var-1"
template_variable.session_id = "sess-1"
template_variable.variable_name = "country"
@@ -301,6 +313,8 @@ def _make_us3_session():
session.previews = []
session.run_contexts = []
return session
# [/DEF:_make_us3_session:Function]
@@ -312,6 +326,8 @@ def _make_preview_ready_session():
session.recommended_action = RecommendedAction.GENERATE_SQL_PREVIEW
session.current_phase = SessionPhase.PREVIEW
return session
# [/DEF:_make_preview_ready_session:Function]
@@ -333,6 +349,8 @@ def dataset_review_api_dependencies():
"task_manager": task_manager,
}
app.dependency_overrides.clear()
# [/DEF:dataset_review_api_dependencies:Function]
@@ -368,6 +386,8 @@ def test_parse_superset_link_dashboard_partial_recovery():
assert result.partial_recovery is True
assert "multiple_dashboard_datasets" in result.unresolved_references
assert result.imported_filters[0]["filter_name"] == "country"
# [/DEF:test_parse_superset_link_dashboard_partial_recovery:Function]
@@ -403,6 +423,8 @@ def test_parse_superset_link_dashboard_slug_recovery():
assert result.partial_recovery is False
assert result.query_state["native_filters_key"] == "8ZLV4M-UXOM"
fake_client.get_dashboard_detail.assert_called_once_with("slack")
# [/DEF:test_parse_superset_link_dashboard_slug_recovery:Function]
@@ -445,7 +467,9 @@ def test_parse_superset_link_dashboard_permalink_partial_recovery():
assert result.dashboard_id is None
assert result.dataset_ref == "dashboard_permalink:QabXy6wG30Z"
assert result.partial_recovery is True
assert "dashboard_permalink_dataset_binding_unresolved" in result.unresolved_references
assert (
"dashboard_permalink_dataset_binding_unresolved" in result.unresolved_references
)
assert result.imported_filters[0]["filter_name"] == "country"
assert result.imported_filters[0]["raw_value"] == ["DE"]
fake_client.get_dashboard_permalink_state.assert_called_once_with("QabXy6wG30Z")
@@ -475,7 +499,10 @@ def test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_da
}
}
fake_client.get_dashboard_detail.return_value = {"id": 22, "datasets": [{"id": 42}]}
fake_client.get_dataset_detail.return_value = {"table_name": "sales", "schema": "public"}
fake_client.get_dataset_detail.return_value = {
"table_name": "sales",
"schema": "public",
}
extractor = SupersetContextExtractor(environment=env, client=fake_client)
result = extractor.parse_superset_link(
@@ -485,8 +512,13 @@ def test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_da
assert result.dashboard_id == 22
assert result.dataset_id == 42
assert result.dataset_ref == "public.sales"
assert "dashboard_permalink_dataset_binding_unresolved" not in result.unresolved_references
assert (
"dashboard_permalink_dataset_binding_unresolved"
not in result.unresolved_references
)
assert result.imported_filters[0]["filter_name"] == "country"
# [/DEF:test_parse_superset_link_dashboard_permalink_recovers_dataset_from_nested_dashboard_state:Function]
# [/DEF:test_parse_superset_link_dashboard_permalink_partial_recovery:Function]
@@ -519,21 +551,29 @@ def test_resolve_from_dictionary_prefers_exact_match():
],
)
resolved_exact = next(item for item in result.resolved_fields if item["field_name"] == "revenue")
unresolved = next(item for item in result.resolved_fields if item["field_name"] == "margin")
resolved_exact = next(
item for item in result.resolved_fields if item["field_name"] == "revenue"
)
unresolved = next(
item for item in result.resolved_fields if item["field_name"] == "margin"
)
assert resolved_exact["applied_candidate"]["match_type"] == "exact"
assert resolved_exact["provenance"] == "dictionary_exact"
assert unresolved["status"] == "unresolved"
assert "margin" in result.unresolved_fields
assert result.partial_recovery is True
# [/DEF:test_resolve_from_dictionary_prefers_exact_match:Function]
# [DEF:test_orchestrator_start_session_preserves_partial_recovery:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify session start persists usable recovery-required state when Superset intake is partial.
def test_orchestrator_start_session_preserves_partial_recovery(dataset_review_api_dependencies):
def test_orchestrator_start_session_preserves_partial_recovery(
dataset_review_api_dependencies,
):
repository = MagicMock()
created_session = _make_session()
created_session.readiness_state = ReadinessState.RECOVERY_REQUIRED
@@ -589,13 +629,17 @@ def test_orchestrator_start_session_preserves_partial_recovery(dataset_review_ap
assert result.findings[0].severity.value == "warning"
repository.create_session.assert_called_once()
repository.save_profile_and_findings.assert_called_once()
# [/DEF:test_orchestrator_start_session_preserves_partial_recovery:Function]
# [DEF:test_orchestrator_start_session_bootstraps_recovery_state:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify session start persists recovered filters, template variables, and initial execution mappings for review workspace bootstrap.
def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api_dependencies):
def test_orchestrator_start_session_bootstraps_recovery_state(
dataset_review_api_dependencies,
):
repository = MagicMock()
created_session = _make_session()
created_session.readiness_state = ReadinessState.RECOVERY_REQUIRED
@@ -631,7 +675,9 @@ def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api
"raw_value": ["DE"],
"normalized_value": {
"filter_clauses": [{"col": "country_code", "op": "IN", "val": ["DE"]}],
"extra_form_data": {"filters": [{"col": "country_code", "op": "IN", "val": ["DE"]}]},
"extra_form_data": {
"filters": [{"col": "country_code", "op": "IN", "val": ["DE"]}]
},
"value_origin": "extra_form_data.filters",
},
"source": "superset_url",
@@ -680,23 +726,31 @@ def test_orchestrator_start_session_bootstraps_recovery_state(dataset_review_api
assert saved_filters[0].filter_name == "country"
assert saved_filters[0].normalized_value == {
"filter_clauses": [{"col": "country_code", "op": "IN", "val": ["DE"]}],
"extra_form_data": {"filters": [{"col": "country_code", "op": "IN", "val": ["DE"]}]},
"extra_form_data": {
"filters": [{"col": "country_code", "op": "IN", "val": ["DE"]}]
},
"value_origin": "extra_form_data.filters",
}
assert len(saved_variables) == 1
assert saved_variables[0].variable_name == "country"
assert len(saved_mappings) == 1
assert saved_mappings[0].raw_input_value == ["DE"]
# [/DEF:test_orchestrator_start_session_bootstraps_recovery_state:Function]
# [DEF:test_start_session_endpoint_returns_created_summary:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify POST session lifecycle endpoint returns a persisted ownership-scoped summary.
def test_start_session_endpoint_returns_created_summary(dataset_review_api_dependencies):
def test_start_session_endpoint_returns_created_summary(
dataset_review_api_dependencies,
):
session = _make_session()
orchestrator = MagicMock()
orchestrator.start_session.return_value = SimpleNamespace(session=session, findings=[], parsed_context=None)
orchestrator.start_session.return_value = SimpleNamespace(
session=session, findings=[], parsed_context=None
)
app.dependency_overrides[_get_orchestrator] = lambda: orchestrator
@@ -714,13 +768,17 @@ def test_start_session_endpoint_returns_created_summary(dataset_review_api_depen
assert payload["session_id"] == "sess-1"
assert payload["dataset_ref"] == "public.sales"
assert payload["environment_id"] == "env-1"
# [/DEF:test_start_session_endpoint_returns_created_summary:Function]
# [DEF:test_get_session_detail_export_and_lifecycle_endpoints:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Verify lifecycle get/patch/delete plus documentation and validation exports remain ownership-scoped and usable.
def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_dependencies):
def test_get_session_detail_export_and_lifecycle_endpoints(
dataset_review_api_dependencies,
):
now = datetime.now(timezone.utc)
session = MagicMock(spec=DatasetReviewSession)
session.session_id = "sess-1"
@@ -788,7 +846,9 @@ def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_de
repository.list_sessions_for_user.return_value = [session]
repository.db = MagicMock()
repository.event_logger = MagicMock(spec=SessionEventLogger)
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-0")
repository.event_logger.log_for_session.return_value = SimpleNamespace(
session_event_id="evt-0"
)
app.dependency_overrides[_get_repository] = lambda: repository
@@ -803,24 +863,32 @@ def test_get_session_detail_export_and_lifecycle_endpoints(dataset_review_api_de
assert patch_response.status_code == 200
assert patch_response.json()["status"] == "paused"
doc_response = client.get("/api/dataset-orchestration/sessions/sess-1/exports/documentation?format=json")
doc_response = client.get(
"/api/dataset-orchestration/sessions/sess-1/exports/documentation?format=json"
)
assert doc_response.status_code == 200
assert doc_response.json()["artifact_type"] == "documentation"
validation_response = client.get("/api/dataset-orchestration/sessions/sess-1/exports/validation?format=markdown")
validation_response = client.get(
"/api/dataset-orchestration/sessions/sess-1/exports/validation?format=markdown"
)
assert validation_response.status_code == 200
assert validation_response.json()["artifact_type"] == "validation_report"
assert "Validation Report" in validation_response.json()["content"]["markdown"]
delete_response = client.delete("/api/dataset-orchestration/sessions/sess-1")
assert delete_response.status_code == 204
# [/DEF:test_get_session_detail_export_and_lifecycle_endpoints:Function]
# [DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Clarification endpoints should expose one current question, persist the answer before advancement, and store feedback on the answer audit record.
def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_api_dependencies):
def test_us2_clarification_endpoints_persist_answer_and_feedback(
dataset_review_api_dependencies,
):
session = _make_us2_session()
repository = MagicMock()
repository.load_session_detail.return_value = session
@@ -837,10 +905,15 @@ def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_
app.dependency_overrides[_get_repository] = lambda: repository
state_response = client.get("/api/dataset-orchestration/sessions/sess-1/clarification")
state_response = client.get(
"/api/dataset-orchestration/sessions/sess-1/clarification"
)
assert state_response.status_code == 200
state_payload = state_response.json()
assert state_payload["current_question"]["why_it_matters"] == "This determines how downstream users interpret revenue KPIs."
assert (
state_payload["current_question"]["why_it_matters"]
== "This determines how downstream users interpret revenue KPIs."
)
assert state_payload["current_question"]["current_guess"] == "Revenue reporting"
assert len(state_payload["current_question"]["options"]) == 2
@@ -857,7 +930,10 @@ def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_
assert answer_payload["session"]["readiness_state"] == "review_ready"
assert answer_payload["clarification_state"]["current_question"] is None
assert answer_payload["changed_findings"][0]["resolution_state"] == "resolved"
assert session.clarification_sessions[0].questions[0].answer.answer_value == "Revenue reporting"
assert (
session.clarification_sessions[0].questions[0].answer.answer_value
== "Revenue reporting"
)
feedback_response = client.post(
"/api/dataset-orchestration/sessions/sess-1/clarification/questions/q-1/feedback",
@@ -866,13 +942,17 @@ def test_us2_clarification_endpoints_persist_answer_and_feedback(dataset_review_
assert feedback_response.status_code == 200
assert feedback_response.json() == {"target_id": "q-1", "feedback": "up"}
assert session.clarification_sessions[0].questions[0].answer.user_feedback == "up"
# [/DEF:test_us2_clarification_endpoints_persist_answer_and_feedback:Function]
# [DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Semantic field endpoints should apply manual overrides with lock/provenance invariants and persist feedback independently.
def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api_dependencies):
def test_us2_field_semantic_override_lock_unlock_and_feedback(
dataset_review_api_dependencies,
):
session = _make_us2_session()
repository = MagicMock()
repository.load_session_detail.return_value = session
@@ -882,7 +962,9 @@ def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api
repository.db.add.side_effect = lambda obj: None
repository.db.flush.side_effect = lambda: None
repository.event_logger = MagicMock(spec=SessionEventLogger)
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-1")
repository.event_logger.log_for_session.return_value = SimpleNamespace(
session_event_id="evt-1"
)
app.dependency_overrides[_get_repository] = lambda: repository
@@ -899,7 +981,9 @@ def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api
assert override_payload["provenance"] == "manual_override"
assert override_payload["is_locked"] is True
unlock_response = client.post("/api/dataset-orchestration/sessions/sess-1/fields/field-1/unlock")
unlock_response = client.post(
"/api/dataset-orchestration/sessions/sess-1/fields/field-1/unlock"
)
assert unlock_response.status_code == 200
assert unlock_response.json()["is_locked"] is False
@@ -915,7 +999,11 @@ def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api
batch_response = client.post(
"/api/dataset-orchestration/sessions/sess-1/fields/semantic/approve-batch",
json={"items": [{"field_id": "field-1", "candidate_id": "cand-1", "lock_field": False}]},
json={
"items": [
{"field_id": "field-1", "candidate_id": "cand-1", "lock_field": False}
]
},
)
assert batch_response.status_code == 200
assert batch_response.json()[0]["field_id"] == "field-1"
@@ -927,13 +1015,17 @@ def test_us2_field_semantic_override_lock_unlock_and_feedback(dataset_review_api
assert feedback_response.status_code == 200
assert feedback_response.json() == {"target_id": "field-1", "feedback": "down"}
assert session.semantic_fields[0].user_feedback == "down"
# [/DEF:test_us2_field_semantic_override_lock_unlock_and_feedback:Function]
# [DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: US3 execution endpoints should persist manual overrides, preserve explicit approval semantics, return contract-shaped preview truth, and expose audited launch handoff.
def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_api_dependencies):
def test_us3_mapping_patch_approval_preview_and_launch_endpoints(
dataset_review_api_dependencies,
):
session = _make_us3_session()
latest_preview = CompiledPreview(
preview_id="preview-old",
@@ -955,7 +1047,9 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
repository.db.commit.side_effect = lambda: None
repository.db.refresh.side_effect = lambda obj: None
repository.event_logger = MagicMock(spec=SessionEventLogger)
repository.event_logger.log_for_session.return_value = SimpleNamespace(session_event_id="evt-2")
repository.event_logger.log_for_session.return_value = SimpleNamespace(
session_event_id="evt-2"
)
preview = SimpleNamespace(
preview_id="preview-1",
@@ -1016,7 +1110,10 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
assert patch_payload["approval_state"] == "approved"
assert patch_payload["approved_by_user_id"] == "user-1"
assert session.execution_mappings[0].mapping_method == MappingMethod.MANUAL_OVERRIDE
assert session.execution_mappings[0].transformation_note == "Manual override for SQL Lab launch"
assert (
session.execution_mappings[0].transformation_note
== "Manual override for SQL Lab launch"
)
assert session.execution_mappings[0].effective_value == "EU"
assert session.recommended_action == RecommendedAction.GENERATE_SQL_PREVIEW
assert latest_preview.preview_status == PreviewStatus.STALE
@@ -1030,7 +1127,10 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
assert approve_payload["mapping_id"] == "map-1"
assert approve_payload["approval_state"] == "approved"
assert approve_payload["approved_by_user_id"] == "user-1"
assert session.execution_mappings[0].transformation_note == "Approved after reviewing transformation"
assert (
session.execution_mappings[0].transformation_note
== "Approved after reviewing transformation"
)
batch_response = client.post(
"/api/dataset-orchestration/sessions/sess-1/mappings/approve-batch",
@@ -1067,7 +1167,9 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
),
blocked_reasons=[],
)
preview_enqueue_response = client.post("/api/dataset-orchestration/sessions/sess-1/preview")
preview_enqueue_response = client.post(
"/api/dataset-orchestration/sessions/sess-1/preview"
)
assert preview_enqueue_response.status_code == 202
assert preview_enqueue_response.json() == {
"session_id": "sess-1",
@@ -1081,7 +1183,12 @@ def test_us3_mapping_patch_approval_preview_and_launch_endpoints(dataset_review_
assert launch_payload["run_context"]["run_context_id"] == "run-1"
assert launch_payload["run_context"]["sql_lab_session_ref"] == "sql-lab-77"
assert launch_payload["run_context"]["launch_status"] == "started"
assert launch_payload["redirect_url"] == "http://superset.local/superset/sqllab?queryId=sql-lab-77"
assert (
launch_payload["redirect_url"]
== "http://superset.local/superset/sqllab?queryId=sql-lab-77"
)
# [/DEF:test_us3_mapping_patch_approval_preview_and_launch_endpoints:Function]
@@ -1131,6 +1238,8 @@ def test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not
assert "/chart/data" in payload["error_details"]
assert "API resource not found" in payload["error_details"]
assert "Dashboard not found" not in payload["error_details"]
# [/DEF:test_us3_preview_endpoint_returns_failed_preview_without_false_dashboard_not_found_contract_drift:Function]
@@ -1170,7 +1279,9 @@ def test_execution_snapshot_includes_recovered_imported_filters_without_template
assert snapshot["preview_blockers"] == []
recovered_filter.normalized_value = {
"filter_clauses": [{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}],
"extra_form_data": {"filters": [{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}]},
"extra_form_data": {
"filters": [{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}]
},
"value_origin": "extra_form_data.filters",
}
@@ -1186,12 +1297,20 @@ def test_execution_snapshot_includes_recovered_imported_filters_without_template
"effective_value": ["DE", "FR"],
"raw_input_value": ["DE", "FR"],
"normalized_filter_payload": {
"filter_clauses": [{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}],
"extra_form_data": {"filters": [{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}]},
"filter_clauses": [
{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}
],
"extra_form_data": {
"filters": [
{"col": "country_code", "op": "IN", "val": ["DE", "FR"]}
]
},
"value_origin": "extra_form_data.filters",
},
}
]
# [/DEF:test_execution_snapshot_includes_recovered_imported_filters_without_template_mapping:Function]
@@ -1227,6 +1346,8 @@ def test_execution_snapshot_preserves_mapped_template_variables_and_filter_conte
}
]
assert snapshot["open_warning_refs"] == ["map-1"]
# [/DEF:test_execution_snapshot_preserves_mapped_template_variables_and_filter_context:Function]
@@ -1265,13 +1386,17 @@ def test_execution_snapshot_skips_partial_imported_filters_without_values(
assert snapshot["template_params"] == {}
assert snapshot["effective_filters"] == []
assert snapshot["preview_blockers"] == []
# [/DEF:test_execution_snapshot_skips_partial_imported_filters_without_values:Function]
# [DEF:test_us3_launch_endpoint_requires_launch_permission:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Launch endpoint should enforce the contract RBAC permission instead of the generic session-manage permission.
def test_us3_launch_endpoint_requires_launch_permission(dataset_review_api_dependencies):
def test_us3_launch_endpoint_requires_launch_permission(
dataset_review_api_dependencies,
):
session = _make_us3_session()
repository = MagicMock()
repository.load_session_detail.return_value = session
@@ -1313,9 +1438,15 @@ def test_us3_launch_endpoint_requires_launch_permission(dataset_review_api_depen
response = client.post("/api/dataset-orchestration/sessions/sess-1/launch")
assert response.status_code == 403
assert response.json()["detail"] == "Permission denied for dataset:execution:launch:EXECUTE"
assert (
response.json()["detail"]
== "Permission denied for dataset:execution:launch:EXECUTE"
)
# [/DEF:test_us3_launch_endpoint_requires_launch_permission:Function]
# [DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function]
# @RELATION: BINDS_TO -> DatasetReviewApiTests
# @PURPOSE: Updated semantic source versions should mark unlocked fields reviewable while preserving locked manual values.
@@ -1340,7 +1471,9 @@ def test_semantic_source_version_propagation_preserves_locked_fields():
has_conflict=False,
)
result = resolver.propagate_source_version_update(source, [unlocked_field, locked_field])
result = resolver.propagate_source_version_update(
source, [unlocked_field, locked_field]
)
assert result["propagated"] == 1
assert result["preserved_locked"] == 1
@@ -1348,6 +1481,8 @@ def test_semantic_source_version_propagation_preserves_locked_fields():
assert unlocked_field.needs_review is True
assert locked_field.source_version == "2026.03"
assert locked_field.needs_review is False
# [/DEF:test_semantic_source_version_propagation_preserves_locked_fields:Function]
# [/DEF:DatasetReviewApiTests:Module]
# [/DEF:DatasetReviewApiTests:Module]

View File

@@ -17,10 +17,15 @@ if backend_dir not in sys.path:
sys.path.insert(0, backend_dir)
import os
# Force SQLite in-memory for all database connections BEFORE importing any application code
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["TASKS_DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["AUTH_DATABASE_URL"] = "sqlite:///:memory:"
# @SIDE_EFFECT_WARNING: os.environ mutation at module import time — no teardown. This bleeds into all subsequently collected tests. Migrate to pytest.fixture(autouse=True) with monkeypatch.setenv.
os.environ["ENVIRONMENT"] = "testing"
@@ -32,18 +37,21 @@ from src.models.mapping import Base, ResourceMapping, ResourceType
# Patch the get_db dependency if `src.api.routes.migration` imports it
from unittest.mock import patch
patch('src.core.database.get_db').start()
patch("src.core.database.get_db").start()
# --- Fixtures ---
@pytest.fixture
def db_session():
"""In-memory SQLite session for testing."""
from sqlalchemy.pool import StaticPool
engine = create_engine(
'sqlite:///:memory:',
connect_args={'check_same_thread': False},
poolclass=StaticPool
"sqlite:///:memory:",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
@@ -70,6 +78,7 @@ def _make_config_manager(cron="0 2 * * *"):
# [/DEF:_make_config_manager:Function]
@pytest.mark.asyncio
async def test_get_migration_settings_returns_default_cron():
"""Verify the settings endpoint returns the stored cron string."""
@@ -99,6 +108,7 @@ async def test_get_migration_settings_returns_fallback_when_no_cron():
# --- update_migration_settings tests ---
@pytest.mark.asyncio
async def test_update_migration_settings_saves_cron():
"""Verify that a valid cron update saves to config."""
@@ -107,9 +117,7 @@ async def test_update_migration_settings_saves_cron():
cm = _make_config_manager()
result = await update_migration_settings(
payload={"cron": "0 4 * * *"},
config_manager=cm,
_=None
payload={"cron": "0 4 * * *"}, config_manager=cm, _=None
)
assert result["cron"] == "0 4 * * *"
@@ -126,9 +134,7 @@ async def test_update_migration_settings_rejects_missing_cron():
with pytest.raises(HTTPException) as exc_info:
await update_migration_settings(
payload={"interval": "daily"},
config_manager=cm,
_=None
payload={"interval": "daily"}, config_manager=cm, _=None
)
assert exc_info.value.status_code == 400
@@ -137,6 +143,7 @@ async def test_update_migration_settings_rejects_missing_cron():
# --- get_resource_mappings tests ---
@pytest.mark.asyncio
async def test_get_resource_mappings_returns_formatted_list(db_session):
"""Verify mappings are returned as formatted dicts with correct keys."""
@@ -149,12 +156,20 @@ async def test_get_resource_mappings_returns_formatted_list(db_session):
uuid="uuid-1",
remote_integer_id="42",
resource_name="Sales Chart",
last_synced_at=datetime(2026, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
last_synced_at=datetime(2026, 1, 15, 12, 0, 0, tzinfo=timezone.utc),
)
db_session.add(m1)
db_session.commit()
result = await get_resource_mappings(skip=0, limit=50, search=None, env_id=None, resource_type=None, db=db_session, _=None)
result = await get_resource_mappings(
skip=0,
limit=50,
search=None,
env_id=None,
resource_type=None,
db=db_session,
_=None,
)
assert result["total"] == 1
assert len(result["items"]) == 1
@@ -172,15 +187,25 @@ async def test_get_resource_mappings_respects_pagination(db_session):
from src.api.routes.migration import get_resource_mappings
for i in range(5):
db_session.add(ResourceMapping(
environment_id="prod",
resource_type=ResourceType.DATASET,
uuid=f"uuid-{i}",
remote_integer_id=str(i),
))
db_session.add(
ResourceMapping(
environment_id="prod",
resource_type=ResourceType.DATASET,
uuid=f"uuid-{i}",
remote_integer_id=str(i),
)
)
db_session.commit()
result = await get_resource_mappings(skip=2, limit=2, search=None, env_id=None, resource_type=None, db=db_session, _=None)
result = await get_resource_mappings(
skip=2,
limit=2,
search=None,
env_id=None,
resource_type=None,
db=db_session,
_=None,
)
assert result["total"] == 5
assert len(result["items"]) == 2
@@ -191,11 +216,35 @@ async def test_get_resource_mappings_search_by_name(db_session):
"""Verify search filters by resource_name."""
from src.api.routes.migration import get_resource_mappings
db_session.add(ResourceMapping(environment_id="prod", resource_type=ResourceType.CHART, uuid="u1", remote_integer_id="1", resource_name="Sales Chart"))
db_session.add(ResourceMapping(environment_id="prod", resource_type=ResourceType.CHART, uuid="u2", remote_integer_id="2", resource_name="Revenue Dashboard"))
db_session.add(
ResourceMapping(
environment_id="prod",
resource_type=ResourceType.CHART,
uuid="u1",
remote_integer_id="1",
resource_name="Sales Chart",
)
)
db_session.add(
ResourceMapping(
environment_id="prod",
resource_type=ResourceType.CHART,
uuid="u2",
remote_integer_id="2",
resource_name="Revenue Dashboard",
)
)
db_session.commit()
result = await get_resource_mappings(skip=0, limit=50, search="sales", env_id=None, resource_type=None, db=db_session, _=None)
result = await get_resource_mappings(
skip=0,
limit=50,
search="sales",
env_id=None,
resource_type=None,
db=db_session,
_=None,
)
assert result["total"] == 1
assert result["items"][0]["resource_name"] == "Sales Chart"
@@ -205,11 +254,35 @@ async def test_get_resource_mappings_filter_by_env(db_session):
"""Verify env_id filter returns only matching environment."""
from src.api.routes.migration import get_resource_mappings
db_session.add(ResourceMapping(environment_id="ss1", resource_type=ResourceType.CHART, uuid="u1", remote_integer_id="1", resource_name="Chart A"))
db_session.add(ResourceMapping(environment_id="ss2", resource_type=ResourceType.CHART, uuid="u2", remote_integer_id="2", resource_name="Chart B"))
db_session.add(
ResourceMapping(
environment_id="ss1",
resource_type=ResourceType.CHART,
uuid="u1",
remote_integer_id="1",
resource_name="Chart A",
)
)
db_session.add(
ResourceMapping(
environment_id="ss2",
resource_type=ResourceType.CHART,
uuid="u2",
remote_integer_id="2",
resource_name="Chart B",
)
)
db_session.commit()
result = await get_resource_mappings(skip=0, limit=50, search=None, env_id="ss2", resource_type=None, db=db_session, _=None)
result = await get_resource_mappings(
skip=0,
limit=50,
search=None,
env_id="ss2",
resource_type=None,
db=db_session,
_=None,
)
assert result["total"] == 1
assert result["items"][0]["environment_id"] == "ss2"
@@ -219,17 +292,42 @@ async def test_get_resource_mappings_filter_by_type(db_session):
"""Verify resource_type filter returns only matching type."""
from src.api.routes.migration import get_resource_mappings
db_session.add(ResourceMapping(environment_id="prod", resource_type=ResourceType.CHART, uuid="u1", remote_integer_id="1", resource_name="My Chart"))
db_session.add(ResourceMapping(environment_id="prod", resource_type=ResourceType.DATASET, uuid="u2", remote_integer_id="2", resource_name="My Dataset"))
db_session.add(
ResourceMapping(
environment_id="prod",
resource_type=ResourceType.CHART,
uuid="u1",
remote_integer_id="1",
resource_name="My Chart",
)
)
db_session.add(
ResourceMapping(
environment_id="prod",
resource_type=ResourceType.DATASET,
uuid="u2",
remote_integer_id="2",
resource_name="My Dataset",
)
)
db_session.commit()
result = await get_resource_mappings(skip=0, limit=50, search=None, env_id=None, resource_type="dataset", db=db_session, _=None)
result = await get_resource_mappings(
skip=0,
limit=50,
search=None,
env_id=None,
resource_type="dataset",
db=db_session,
_=None,
)
assert result["total"] == 1
assert result["items"][0]["resource_type"] == "dataset"
# --- trigger_sync_now tests ---
@pytest.fixture
# [DEF:_mock_env:Function]
# @RELATION: BINDS_TO -> TestMigrationRoutes
@@ -248,6 +346,7 @@ def _mock_env():
# [/DEF:_mock_env:Function]
# [DEF:_make_sync_config_manager:Function]
# @RELATION: BINDS_TO -> TestMigrationRoutes
def _make_sync_config_manager(environments):
@@ -265,6 +364,7 @@ def _make_sync_config_manager(environments):
# [/DEF:_make_sync_config_manager:Function]
@pytest.mark.asyncio
async def test_trigger_sync_now_creates_env_row_and_syncs(db_session, _mock_env):
"""Verify that trigger_sync_now creates an Environment row in DB before syncing,
@@ -274,8 +374,10 @@ async def test_trigger_sync_now_creates_env_row_and_syncs(db_session, _mock_env)
cm = _make_sync_config_manager([_mock_env])
with patch("src.api.routes.migration.SupersetClient") as MockClient, \
patch("src.api.routes.migration.IdMappingService") as MockService:
with (
patch("src.api.routes.migration.SupersetClient") as MockClient,
patch("src.api.routes.migration.IdMappingService") as MockService,
):
mock_client_instance = MagicMock()
MockClient.return_value = mock_client_instance
mock_service_instance = MagicMock()
@@ -290,7 +392,9 @@ async def test_trigger_sync_now_creates_env_row_and_syncs(db_session, _mock_env)
assert env_row.url == "http://superset.test"
# Sync must have been called
mock_service_instance.sync_environment.assert_called_once_with("test-env-1", mock_client_instance)
mock_service_instance.sync_environment.assert_called_once_with(
"test-env-1", mock_client_instance
)
assert result["synced_count"] == 1
assert result["failed_count"] == 0
@@ -325,10 +429,15 @@ async def test_trigger_sync_now_handles_partial_failure(db_session, _mock_env):
cm = _make_sync_config_manager([_mock_env, env2])
with patch("src.api.routes.migration.SupersetClient") as MockClient, \
patch("src.api.routes.migration.IdMappingService") as MockService:
with (
patch("src.api.routes.migration.SupersetClient") as MockClient,
patch("src.api.routes.migration.IdMappingService") as MockService,
):
mock_service_instance = MagicMock()
mock_service_instance.sync_environment.side_effect = [None, RuntimeError("Connection refused")]
mock_service_instance.sync_environment.side_effect = [
None,
RuntimeError("Connection refused"),
]
MockService.return_value = mock_service_instance
MockClient.return_value = MagicMock()
@@ -347,8 +456,10 @@ async def test_trigger_sync_now_idempotent_env_upsert(db_session, _mock_env):
cm = _make_sync_config_manager([_mock_env])
with patch("src.api.routes.migration.SupersetClient"), \
patch("src.api.routes.migration.IdMappingService"):
with (
patch("src.api.routes.migration.SupersetClient"),
patch("src.api.routes.migration.IdMappingService"),
):
await trigger_sync_now(config_manager=cm, db=db_session, _=None)
await trigger_sync_now(config_manager=cm, db=db_session, _=None)
@@ -358,64 +469,71 @@ async def test_trigger_sync_now_idempotent_env_upsert(db_session, _mock_env):
# --- get_dashboards tests ---
@pytest.mark.asyncio
async def test_get_dashboards_success(_mock_env):
from src.api.routes.migration import get_dashboards
cm = _make_sync_config_manager([_mock_env])
with patch("src.api.routes.migration.SupersetClient") as MockClient:
mock_client = MagicMock()
mock_client.get_dashboards_summary.return_value = [{"id": 1, "title": "Test"}]
MockClient.return_value = mock_client
result = await get_dashboards(env_id="test-env-1", config_manager=cm, _=None)
assert len(result) == 1
assert result[0]["id"] == 1
@pytest.mark.asyncio
async def test_get_dashboards_invalid_env_raises_404(_mock_env):
from src.api.routes.migration import get_dashboards
cm = _make_sync_config_manager([_mock_env])
with pytest.raises(HTTPException) as exc:
await get_dashboards(env_id="wrong-env", config_manager=cm, _=None)
assert exc.value.status_code == 404
# --- execute_migration tests ---
@pytest.mark.asyncio
async def test_execute_migration_success(_mock_env):
from src.api.routes.migration import execute_migration
from src.models.dashboard import DashboardSelection
cm = _make_sync_config_manager([_mock_env, _mock_env]) # Need both source/target
cm = _make_sync_config_manager([_mock_env, _mock_env]) # Need both source/target
tm = MagicMock()
tm.create_task = AsyncMock(return_value=MagicMock(id="task-123"))
selection = DashboardSelection(
source_env_id="test-env-1",
target_env_id="test-env-1",
selected_ids=[1, 2]
source_env_id="test-env-1", target_env_id="test-env-1", selected_ids=[1, 2]
)
result = await execute_migration(
selection=selection, config_manager=cm, task_manager=tm, _=None
)
result = await execute_migration(selection=selection, config_manager=cm, task_manager=tm, _=None)
assert result["task_id"] == "task-123"
tm.create_task.assert_called_once()
@pytest.mark.asyncio
async def test_execute_migration_invalid_env_raises_400(_mock_env):
from src.api.routes.migration import execute_migration
from src.models.dashboard import DashboardSelection
cm = _make_sync_config_manager([_mock_env])
selection = DashboardSelection(
source_env_id="test-env-1",
target_env_id="non-existent",
selected_ids=[1]
source_env_id="test-env-1", target_env_id="non-existent", selected_ids=[1]
)
with pytest.raises(HTTPException) as exc:
await execute_migration(selection=selection, config_manager=cm, task_manager=MagicMock(), _=None)
await execute_migration(
selection=selection, config_manager=cm, task_manager=MagicMock(), _=None
)
assert exc.value.status_code == 400
@@ -453,8 +571,10 @@ async def test_dry_run_migration_returns_diff_and_risk(db_session):
fix_cross_filters=True,
)
with patch("src.api.routes.migration.SupersetClient") as MockClient, \
patch("src.api.routes.migration.MigrationDryRunService") as MockService:
with (
patch("src.api.routes.migration.SupersetClient") as MockClient,
patch("src.api.routes.migration.MigrationDryRunService") as MockService,
):
source_client = MagicMock()
target_client = MagicMock()
MockClient.side_effect = [source_client, target_client]
@@ -465,9 +585,17 @@ async def test_dry_run_migration_returns_diff_and_risk(db_session):
"selection": selection.model_dump(),
"selected_dashboard_titles": ["Sales"],
"diff": {
"dashboards": {"create": [], "update": [{"uuid": "dash-1"}], "delete": []},
"dashboards": {
"create": [],
"update": [{"uuid": "dash-1"}],
"delete": [],
},
"charts": {"create": [{"uuid": "chart-1"}], "update": [], "delete": []},
"datasets": {"create": [{"uuid": "dataset-1"}], "update": [], "delete": []},
"datasets": {
"create": [{"uuid": "dataset-1"}],
"update": [],
"delete": [],
},
},
"summary": {
"dashboards": {"create": 0, "update": 1, "delete": 0},
@@ -487,7 +615,9 @@ async def test_dry_run_migration_returns_diff_and_risk(db_session):
service_instance.run.return_value = service_payload
MockService.return_value = service_instance
result = await dry_run_migration(selection=selection, config_manager=cm, db=db_session, _=None)
result = await dry_run_migration(
selection=selection, config_manager=cm, db=db_session, _=None
)
assert result["summary"]["dashboards"]["update"] == 1
assert result["summary"]["charts"]["create"] == 1
@@ -512,10 +642,14 @@ async def test_dry_run_migration_rejects_same_environment(db_session):
env.timeout = 30
cm = _make_sync_config_manager([env])
selection = DashboardSelection(selected_ids=[1], source_env_id="same", target_env_id="same")
selection = DashboardSelection(
selected_ids=[1], source_env_id="same", target_env_id="same"
)
with pytest.raises(HTTPException) as exc:
await dry_run_migration(selection=selection, config_manager=cm, db=db_session, _=None)
await dry_run_migration(
selection=selection, config_manager=cm, db=db_session, _=None
)
assert exc.value.status_code == 400

View File

@@ -16,6 +16,7 @@ from src.core.task_manager.models import Task, TaskStatus
from src.dependencies import get_current_user, get_task_manager
# @DEBT: Divergent _FakeTaskManager definition. Canonical version should be in conftest.py. Authz variant is missing get_all_tasks().
# [DEF:_FakeTaskManager:Class]
# @RELATION: BINDS_TO -> [TestReportsApi]
# @COMPLEXITY: 1

View File

@@ -16,6 +16,7 @@ from src.core.task_manager.models import Task, TaskStatus
from src.dependencies import get_current_user, get_task_manager
# @DEBT: Divergent _FakeTaskManager definition. Canonical version should be in conftest.py. Authz variant is missing get_all_tasks().
# [DEF:_FakeTaskManager:Class]
# @RELATION: BINDS_TO -> [TestReportsDetailApi]
# @COMPLEXITY: 1

View File

@@ -204,6 +204,7 @@ def test_delete_validation_report_deletes_dashboard_scope_and_linked_tasks():
screenshot_path=None,
)
# @RISK: db.query side_effect chain may not propagate through .filter().first() — verify mock chain setup is correct for this test.
first_query = MagicMock()
first_query.first.return_value = target_record
@@ -270,6 +271,7 @@ def test_delete_validation_report_swallows_linked_task_cleanup_failure():
screenshot_path=None,
)
# @RISK: db.query side_effect chain may not propagate through .filter().first() — verify mock chain setup is correct for this test.
first_query = MagicMock()
first_query.first.return_value = record

View File

@@ -73,6 +73,7 @@ def test_decrypt_invalid_data():
# @INVARIANT: Chained calls beyond Session spec create unconstrained intermediate mocks; only top-level query/add/commit are spec-enforced.
@pytest.fixture
def mock_db():
# @RISK: query() returns unconstrained MagicMock — chain beyond query() has no spec protection. Consider create_autospec(Session) for full chain safety.
return MagicMock(spec=Session)

View File

@@ -39,6 +39,7 @@ class MockMappingService:
self.mappings = mappings
def get_remote_ids_batch(self, env_id, resource_type, uuids):
# @INVARIANT_VIOLATION: resource_type parameter is silently ignored. Lookups are UUID-only; chart and dataset UUIDs share the same namespace. Tests that mix resource types will produce false positives.
result = {}
for uuid in uuids:
if uuid in self.mappings:

View File

@@ -148,6 +148,7 @@ class CleanReleaseCompliancePlugin:
# @PURPOSE: Provide minimal plugin loader contract used by TaskManager in integration tests.
# @INVARIANT: has_plugin/get_plugin only acknowledge the seeded compliance plugin id.
class _PluginLoaderStub:
# @CONTRACT: Partial PluginLoader stub. Implements: has_plugin, get_plugin. Stubs (NotImplementedError): list_plugins, get_all_plugins, get_all_plugin_configs.
def __init__(self, plugin: CleanReleaseCompliancePlugin):
self._plugin = plugin
@@ -159,6 +160,21 @@ class _PluginLoaderStub:
raise ValueError("Plugin not found")
return self._plugin
def list_plugins(self):
raise NotImplementedError(
"list_plugins not implemented in _PluginLoaderStub; add if test path requires plugin enumeration"
)
def get_all_plugins(self):
raise NotImplementedError(
"get_all_plugins not implemented in _PluginLoaderStub; add if test path requires full plugin set"
)
def get_all_plugin_configs(self):
raise NotImplementedError(
"get_all_plugin_configs not implemented in _PluginLoaderStub; add if test path requires plugin configs"
)
# [/DEF:_PluginLoaderStub:Class]

View File

@@ -37,6 +37,8 @@ def make_adapter():
client = MagicMock()
client.network = MagicMock()
return SupersetCompilationAdapter(environment=environment, client=client), client
# [/DEF:make_adapter:Function]
@@ -61,6 +63,8 @@ def test_preview_prefers_supported_client_method_before_network_fallback():
assert preview.compiled_sql == "SELECT 1"
client.compile_preview.assert_called_once()
client.network.request.assert_not_called()
# [/DEF:test_preview_prefers_supported_client_method_before_network_fallback:Function]
@@ -88,10 +92,14 @@ def test_preview_falls_back_across_matrix_until_supported_endpoint_returns_sql()
assert preview.preview_status.value == "ready"
assert preview.compiled_sql == "SELECT * FROM dataset_77"
assert client.network.request.call_count == 2
# @FRAGILE: Positional call assertion — ordering changes will break this test without indicating a real regression. Prefer content-based assertion.
first_call = client.network.request.call_args_list[0].kwargs
# @FRAGILE: Positional call assertion — ordering changes will break this test without indicating a real regression. Prefer content-based assertion.
second_call = client.network.request.call_args_list[1].kwargs
assert first_call["endpoint"] == "/dataset/77/preview"
assert second_call["endpoint"] == "/dataset/77/sql"
# [/DEF:test_preview_falls_back_across_matrix_until_supported_endpoint_returns_sql:Function]
@@ -124,11 +132,15 @@ def test_sql_lab_launch_falls_back_to_legacy_execute_endpoint():
assert sql_lab_ref == "query-123"
assert client.network.request.call_count == 2
# @FRAGILE: Positional call assertion — ordering changes will break this test without indicating a real regression. Prefer content-based assertion.
first_call = client.network.request.call_args_list[0].kwargs
# @FRAGILE: Positional call assertion — ordering changes will break this test without indicating a real regression. Prefer content-based assertion.
second_call = client.network.request.call_args_list[1].kwargs
assert first_call["endpoint"] == "/sqllab/execute/"
assert second_call["endpoint"] == "/sql_lab/execute/"
# [/DEF:test_sql_lab_launch_falls_back_to_legacy_execute_endpoint:Function]
# [/DEF:SupersetCompatibilityMatrixTests:Module]
# [/DEF:SupersetCompatibilityMatrixTests:Module]