The graph becomes useful. Before this commit, entities sat in the DB
as data with no narrative. After: the director can ask "what am I
forgetting?" and get a structured answer in milliseconds.
New module (src/atocore/engineering/queries.py, 360 lines):
Structure queries (Q-001/004/005/008/013):
- system_map(project): full subsystem → component tree + orphans +
materials joined per component
- decisions_affecting(project, subsystem_id?): decisions linked via
AFFECTED_BY_DECISION, scoped to a subsystem or whole project
- requirements_for(component_id): Q-005 forward trace
- recent_changes(project, since, limit): Q-013 via memory_audit join
(reuses the Phase 4 audit infrastructure — entity_kind='entity')
The 3 killer queries (the real value):
- orphan_requirements(project): requirements with NO inbound SATISFIES
edge. "What do I claim the system must do that nothing actually
claims to handle?" Q-006.
- risky_decisions(project): decisions whose BASED_ON_ASSUMPTION edge
points to an assumption with status in ('superseded','invalid') OR
properties.flagged=True. Finds cascading risk from shaky premises. Q-009.
- unsupported_claims(project): ValidationClaim entities with no inbound
SUPPORTS edge — asserted but no Result to back them. Q-011.
- all_gaps(project): runs all three in one call for dashboards.
History + impact (Q-016/017):
- impact_analysis(entity_id, max_depth=3): BFS over outbound edges.
"What's downstream of this if I change it?"
- evidence_chain(entity_id): inbound SUPPORTS/EVIDENCED_BY/DESCRIBED_BY/
VALIDATED_BY/ANALYZED_BY. "How do I know this is true?"
API (src/atocore/api/routes.py) exposes 10 endpoints:
- GET /engineering/projects/{p}/systems
- GET /engineering/decisions?project=&subsystem=
- GET /engineering/components/{id}/requirements
- GET /engineering/changes?project=&since=&limit=
- GET /engineering/gaps/orphan-requirements?project=
- GET /engineering/gaps/risky-decisions?project=
- GET /engineering/gaps/unsupported-claims?project=
- GET /engineering/gaps?project= (combined)
- GET /engineering/impact?entity=&max_depth=
- GET /engineering/evidence?entity=
Mirror integration (src/atocore/engineering/mirror.py):
- New _gaps_section() renders at top of every project page
- If any gap non-empty: shows up-to-10 per category with names + context
- Clean project: "✅ No gaps detected" — signals everything is traced
Triage UI (src/atocore/engineering/triage_ui.py):
- /admin/triage now shows BOTH memory candidates AND entity candidates
- Entity cards: name, type, project, confidence, source provenance,
Promote/Reject buttons, link to wiki entity page
- Entity promote/reject via fetch to /entities/{id}/promote|reject
- One triage UI for the whole pipeline — consistent muscle memory
Tests: 326 → 341 (15 new, all in test_engineering_queries.py):
- System map structure + orphan detection + material joins
- Killer queries: positive + negative cases (empty when clean)
- Decisions query: project-wide and subsystem-scoped
- Impact analysis walks outbound BFS
- Evidence chain walks inbound provenance
No regressions. All 10 daily queries from the plan are now live and
answering real questions against the graph.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
213 lines
7.3 KiB
Python
213 lines
7.3 KiB
Python
"""Phase 5 tests — the 10 canonical engineering queries.
|
|
|
|
Test fixtures seed a small p-test graph and exercise each query. The 3 killer
|
|
queries (Q-006/009/011) get dedicated tests that verify they surface real gaps
|
|
and DON'T false-positive on well-formed data.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
from atocore.engineering.queries import (
|
|
all_gaps,
|
|
decisions_affecting,
|
|
evidence_chain,
|
|
impact_analysis,
|
|
orphan_requirements,
|
|
recent_changes,
|
|
requirements_for,
|
|
risky_decisions,
|
|
system_map,
|
|
unsupported_claims,
|
|
)
|
|
from atocore.engineering.service import (
|
|
create_entity,
|
|
create_relationship,
|
|
init_engineering_schema,
|
|
)
|
|
from atocore.models.database import init_db
|
|
|
|
|
|
@pytest.fixture
|
|
def seeded_graph(tmp_data_dir):
|
|
"""Build a small engineering graph for query tests."""
|
|
init_db()
|
|
init_engineering_schema()
|
|
|
|
# Subsystem + components
|
|
ss = create_entity("subsystem", "Optics", project="p-test")
|
|
c1 = create_entity("component", "Primary Mirror", project="p-test")
|
|
c2 = create_entity("component", "Diverger Lens", project="p-test")
|
|
c_orphan = create_entity("component", "Unparented", project="p-test")
|
|
create_relationship(c1.id, ss.id, "part_of")
|
|
create_relationship(c2.id, ss.id, "part_of")
|
|
|
|
# Requirements — one satisfied, one orphan
|
|
r_ok = create_entity("requirement", "Surface figure < 25nm RMS", project="p-test")
|
|
r_orphan = create_entity("requirement", "Measurement lambda/20", project="p-test")
|
|
create_relationship(c1.id, r_ok.id, "satisfies")
|
|
|
|
# Decisions
|
|
d_ok = create_entity("decision", "Use Zerodur blank", project="p-test")
|
|
d_risky = create_entity("decision", "Use external CGH", project="p-test")
|
|
create_relationship(d_ok.id, ss.id, "affected_by_decision")
|
|
|
|
# Assumption (flagged) — d_risky depends on it
|
|
a_flagged = create_entity(
|
|
"parameter", "Vendor lead time 6 weeks",
|
|
project="p-test",
|
|
properties={"flagged": True},
|
|
)
|
|
create_relationship(d_risky.id, a_flagged.id, "based_on_assumption")
|
|
|
|
# Validation claim — one supported, one not
|
|
v_ok = create_entity("validation_claim", "Margin is adequate", project="p-test")
|
|
v_orphan = create_entity("validation_claim", "Thermal stability OK", project="p-test")
|
|
result = create_entity("result", "FEA thermal sweep 2026-03", project="p-test")
|
|
create_relationship(result.id, v_ok.id, "supports")
|
|
|
|
# Material
|
|
mat = create_entity("material", "Zerodur", project="p-test")
|
|
create_relationship(c1.id, mat.id, "uses_material")
|
|
|
|
return {
|
|
"subsystem": ss, "component_1": c1, "component_2": c2,
|
|
"orphan_component": c_orphan,
|
|
"req_ok": r_ok, "req_orphan": r_orphan,
|
|
"decision_ok": d_ok, "decision_risky": d_risky,
|
|
"assumption_flagged": a_flagged,
|
|
"claim_supported": v_ok, "claim_orphan": v_orphan,
|
|
"result": result, "material": mat,
|
|
}
|
|
|
|
|
|
# --- Structure queries ---
|
|
|
|
|
|
def test_system_map_returns_subsystem_with_components(seeded_graph):
|
|
result = system_map("p-test")
|
|
assert result["project"] == "p-test"
|
|
assert len(result["subsystems"]) == 1
|
|
optics = result["subsystems"][0]
|
|
assert optics["name"] == "Optics"
|
|
comp_names = {c["name"] for c in optics["components"]}
|
|
assert "Primary Mirror" in comp_names
|
|
assert "Diverger Lens" in comp_names
|
|
|
|
|
|
def test_system_map_reports_orphan_components(seeded_graph):
|
|
result = system_map("p-test")
|
|
names = {c["name"] for c in result["orphan_components"]}
|
|
assert "Unparented" in names
|
|
|
|
|
|
def test_system_map_includes_materials(seeded_graph):
|
|
result = system_map("p-test")
|
|
primary = next(
|
|
c for s in result["subsystems"] for c in s["components"] if c["name"] == "Primary Mirror"
|
|
)
|
|
assert "Zerodur" in primary["materials"]
|
|
|
|
|
|
def test_decisions_affecting_whole_project(seeded_graph):
|
|
result = decisions_affecting("p-test")
|
|
names = {d["name"] for d in result["decisions"]}
|
|
assert "Use Zerodur blank" in names
|
|
assert "Use external CGH" in names
|
|
|
|
|
|
def test_decisions_affecting_specific_subsystem(seeded_graph):
|
|
ss_id = seeded_graph["subsystem"].id
|
|
result = decisions_affecting("p-test", subsystem_id=ss_id)
|
|
names = {d["name"] for d in result["decisions"]}
|
|
# d_ok has edge to subsystem directly
|
|
assert "Use Zerodur blank" in names
|
|
|
|
|
|
def test_requirements_for_component(seeded_graph):
|
|
c_id = seeded_graph["component_1"].id
|
|
result = requirements_for(c_id)
|
|
assert result["count"] == 1
|
|
assert result["requirements"][0]["name"] == "Surface figure < 25nm RMS"
|
|
|
|
|
|
def test_recent_changes_includes_created_entities(seeded_graph):
|
|
result = recent_changes("p-test", limit=100)
|
|
actions = [c["action"] for c in result["changes"]]
|
|
assert "created" in actions
|
|
assert result["count"] > 0
|
|
|
|
|
|
# --- Killer queries ---
|
|
|
|
|
|
def test_orphan_requirements_finds_unsatisfied(seeded_graph):
|
|
result = orphan_requirements("p-test")
|
|
names = {r["name"] for r in result["gaps"]}
|
|
assert "Measurement lambda/20" in names # orphan
|
|
assert "Surface figure < 25nm RMS" not in names # has SATISFIES edge
|
|
|
|
|
|
def test_orphan_requirements_empty_when_all_satisfied(tmp_data_dir):
|
|
init_db()
|
|
init_engineering_schema()
|
|
c = create_entity("component", "C", project="p-clean")
|
|
r = create_entity("requirement", "R", project="p-clean")
|
|
create_relationship(c.id, r.id, "satisfies")
|
|
result = orphan_requirements("p-clean")
|
|
assert result["count"] == 0
|
|
|
|
|
|
def test_risky_decisions_finds_flagged_assumptions(seeded_graph):
|
|
result = risky_decisions("p-test")
|
|
names = {d["decision_name"] for d in result["gaps"]}
|
|
assert "Use external CGH" in names
|
|
assert "Use Zerodur blank" not in names # has no flagged assumption
|
|
|
|
|
|
def test_unsupported_claims_finds_orphan_claims(seeded_graph):
|
|
result = unsupported_claims("p-test")
|
|
names = {c["name"] for c in result["gaps"]}
|
|
assert "Thermal stability OK" in names
|
|
assert "Margin is adequate" not in names # has SUPPORTS edge
|
|
|
|
|
|
def test_all_gaps_combines_the_three_killers(seeded_graph):
|
|
result = all_gaps("p-test")
|
|
assert result["orphan_requirements"]["count"] == 1
|
|
assert result["risky_decisions"]["count"] == 1
|
|
assert result["unsupported_claims"]["count"] == 1
|
|
|
|
|
|
def test_all_gaps_clean_project_reports_zero(tmp_data_dir):
|
|
init_db()
|
|
init_engineering_schema()
|
|
create_entity("component", "alone", project="p-empty")
|
|
result = all_gaps("p-empty")
|
|
assert result["orphan_requirements"]["count"] == 0
|
|
assert result["risky_decisions"]["count"] == 0
|
|
assert result["unsupported_claims"]["count"] == 0
|
|
|
|
|
|
# --- Impact + evidence ---
|
|
|
|
|
|
def test_impact_analysis_walks_outbound_edges(seeded_graph):
|
|
c_id = seeded_graph["component_1"].id
|
|
result = impact_analysis(c_id, max_depth=2)
|
|
# Primary Mirror → SATISFIES → Requirement, → USES_MATERIAL → Material
|
|
rel_types = {i["relationship"] for i in result["impacted"]}
|
|
assert "satisfies" in rel_types
|
|
assert "uses_material" in rel_types
|
|
|
|
|
|
def test_evidence_chain_walks_inbound_provenance(seeded_graph):
|
|
v_ok_id = seeded_graph["claim_supported"].id
|
|
result = evidence_chain(v_ok_id)
|
|
# The Result entity supports the claim
|
|
via_types = {e["via"] for e in result["evidence_chain"]}
|
|
assert "supports" in via_types
|
|
source_names = {e["source_name"] for e in result["evidence_chain"]}
|
|
assert "FEA thermal sweep 2026-03" in source_names
|