Codex's formal audit of fb4d55c said GO WITH CONDITIONS. Two P2 findings
to fold in before merge:
1. auto_triage.py:417 still PUT {"content": cand["content"]} — the
suggested-project correction was unreachable even with
MemoryUpdateRequest.project in place. Changed body to
{"project": suggested} so misattribution flags actually retarget the
memory. Added a regression test that asserts the script source
contains the new PUT shape, so a future "optimization" can't silently
undo this.
2. POST /memory/{id}/supersede had no status guard — calling
supersede_memory() delegated to update_memory(status="superseded"),
which would silently flip a candidate to superseded. Mirrored the
invalidate route: get_memory(id) lookup, 404 unknown / 200
already_superseded / 409 wrong-status / 200 superseded.
Plus a P3 from the same audit: covered the "retarget to project=''
when a global active duplicate exists" case via
test_update_memory_to_empty_project_detects_global_duplicate.
Tests: 581 -> 586 (+5: 3 supersede route + 1 project-empty duplicate +
1 auto_triage caller invariant).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
696 lines
25 KiB
Python
696 lines
25 KiB
Python
"""Tests for Memory Core."""
|
|
|
|
import os
|
|
import tempfile
|
|
|
|
import pytest
|
|
|
|
import atocore.config as _config
|
|
from atocore.models.database import init_db
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def isolated_db():
|
|
"""Give each test a completely isolated database."""
|
|
tmpdir = tempfile.mkdtemp()
|
|
os.environ["ATOCORE_DATA_DIR"] = tmpdir
|
|
|
|
# Replace the global settings so all modules see the new data_dir
|
|
_config.settings = _config.Settings()
|
|
|
|
# Also reset any module-level references to the old settings
|
|
import atocore.models.database
|
|
# database.py now uses _config.settings dynamically, so no patch needed
|
|
|
|
init_db()
|
|
yield tmpdir
|
|
|
|
|
|
def test_create_memory(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
mem = create_memory("identity", "User is a mechanical engineer specializing in optics")
|
|
assert mem.memory_type == "identity"
|
|
assert mem.status == "active"
|
|
assert mem.confidence == 1.0
|
|
|
|
|
|
def test_create_memory_invalid_type(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
with pytest.raises(ValueError, match="Invalid memory type"):
|
|
create_memory("invalid_type", "some content")
|
|
|
|
|
|
def test_create_memory_dedup(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
m1 = create_memory("identity", "User is an engineer")
|
|
m2 = create_memory("identity", "User is an engineer")
|
|
assert m1.id == m2.id
|
|
|
|
|
|
def test_create_memory_dedup_is_project_scoped(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
m1 = create_memory("project", "Uses SQLite for local state", project="atocore")
|
|
m2 = create_memory("project", "Uses SQLite for local state", project="openclaw")
|
|
assert m1.id != m2.id
|
|
|
|
|
|
def test_project_is_persisted_and_filterable(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories
|
|
create_memory("project", "Uses SQLite for local state", project="atocore")
|
|
create_memory("project", "Uses Postgres in production", project="openclaw")
|
|
|
|
atocore_memories = get_memories(memory_type="project", project="atocore")
|
|
assert len(atocore_memories) == 1
|
|
assert atocore_memories[0].project == "atocore"
|
|
|
|
|
|
def test_get_memories_all(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories
|
|
create_memory("identity", "User is an engineer")
|
|
create_memory("preference", "Prefers Python with type hints")
|
|
create_memory("knowledge", "Zerodur has near-zero thermal expansion")
|
|
|
|
mems = get_memories()
|
|
assert len(mems) == 3
|
|
|
|
|
|
def test_get_memories_by_type(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories
|
|
create_memory("identity", "User is an engineer")
|
|
create_memory("preference", "Prefers concise code")
|
|
create_memory("preference", "Uses FastAPI for APIs")
|
|
|
|
mems = get_memories(memory_type="preference")
|
|
assert len(mems) == 2
|
|
|
|
|
|
def test_get_memories_active_only(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories, invalidate_memory
|
|
m = create_memory("knowledge", "Fact about optics")
|
|
invalidate_memory(m.id)
|
|
|
|
assert len(get_memories(active_only=True)) == 0
|
|
assert len(get_memories(active_only=False)) == 1
|
|
|
|
|
|
def test_get_memories_min_confidence(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories
|
|
create_memory("knowledge", "High confidence fact", confidence=0.9)
|
|
create_memory("knowledge", "Low confidence fact", confidence=0.3)
|
|
|
|
high = get_memories(min_confidence=0.5)
|
|
assert len(high) == 1
|
|
assert high[0].confidence == 0.9
|
|
|
|
|
|
def test_update_memory(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories, update_memory
|
|
mem = create_memory("knowledge", "Initial fact")
|
|
update_memory(mem.id, content="Updated fact", confidence=0.8)
|
|
|
|
mems = get_memories()
|
|
assert len(mems) == 1
|
|
assert mems[0].content == "Updated fact"
|
|
assert mems[0].confidence == 0.8
|
|
|
|
|
|
def test_update_memory_rejects_duplicate_active_memory(isolated_db):
|
|
from atocore.memory.service import create_memory, update_memory
|
|
import pytest
|
|
|
|
first = create_memory("knowledge", "Canonical fact", project="atocore")
|
|
second = create_memory("knowledge", "Different fact", project="atocore")
|
|
|
|
with pytest.raises(ValueError, match="duplicate active memory"):
|
|
update_memory(second.id, content="Canonical fact")
|
|
|
|
|
|
def test_create_memory_validates_confidence(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
import pytest
|
|
|
|
with pytest.raises(ValueError, match="Confidence must be between 0.0 and 1.0"):
|
|
create_memory("knowledge", "Out of range", confidence=1.5)
|
|
|
|
|
|
def test_invalidate_memory(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories, invalidate_memory
|
|
mem = create_memory("knowledge", "Wrong fact")
|
|
invalidate_memory(mem.id)
|
|
assert len(get_memories(active_only=True)) == 0
|
|
|
|
|
|
def test_supersede_memory(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories, supersede_memory
|
|
mem = create_memory("knowledge", "Old fact")
|
|
supersede_memory(mem.id)
|
|
|
|
mems = get_memories(active_only=False)
|
|
assert len(mems) == 1
|
|
assert mems[0].status == "superseded"
|
|
|
|
|
|
def test_memories_for_context(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
create_memory("identity", "User is a senior mechanical engineer")
|
|
create_memory("preference", "Prefers Python with type hints")
|
|
|
|
text, chars = get_memories_for_context(memory_types=["identity", "preference"], budget=500)
|
|
assert "--- AtoCore Memory ---" in text
|
|
assert "[identity]" in text
|
|
assert "[preference]" in text
|
|
assert chars > 0
|
|
|
|
|
|
def test_memories_for_context_reserves_room_for_each_type(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
create_memory("identity", "Identity entry that is intentionally long so it could consume the whole budget on its own")
|
|
create_memory("preference", "Preference entry that should still appear")
|
|
|
|
text, _ = get_memories_for_context(memory_types=["identity", "preference"], budget=120)
|
|
assert "[preference]" in text
|
|
|
|
|
|
def test_memories_for_context_respects_actual_serialized_budget(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
create_memory("identity", "Identity text that should fit the wrapper-aware memory budget calculation")
|
|
create_memory("preference", "Preference text that should also fit")
|
|
|
|
text, chars = get_memories_for_context(memory_types=["identity", "preference"], budget=140)
|
|
assert chars == len(text)
|
|
assert chars <= 140
|
|
|
|
|
|
def test_memories_for_context_empty(isolated_db):
|
|
from atocore.memory.service import get_memories_for_context
|
|
text, chars = get_memories_for_context()
|
|
assert text == ""
|
|
assert chars == 0
|
|
|
|
|
|
# --- Phase 10: auto-promotion + candidate expiry ---
|
|
|
|
|
|
def _get_memory_by_id(memory_id):
|
|
"""Helper: fetch a single memory by ID."""
|
|
from atocore.models.database import get_connection
|
|
with get_connection() as conn:
|
|
row = conn.execute("SELECT * FROM memories WHERE id = ?", (memory_id,)).fetchone()
|
|
return dict(row) if row else None
|
|
|
|
|
|
def test_auto_promote_reinforced_basic(isolated_db):
|
|
from atocore.memory.service import (
|
|
auto_promote_reinforced,
|
|
create_memory,
|
|
reinforce_memory,
|
|
)
|
|
|
|
mem_obj = create_memory("knowledge", "Zerodur has near-zero CTE", status="candidate", confidence=0.7)
|
|
mid = mem_obj.id
|
|
# reinforce_memory only touches active memories, so we need to
|
|
# promote first to reinforce, then demote back to candidate —
|
|
# OR just bump reference_count + last_referenced_at directly
|
|
from atocore.models.database import get_connection
|
|
from datetime import datetime, timezone
|
|
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
|
with get_connection() as conn:
|
|
conn.execute(
|
|
"UPDATE memories SET reference_count = 3, last_referenced_at = ? WHERE id = ?",
|
|
(now, mid),
|
|
)
|
|
|
|
promoted = auto_promote_reinforced(min_reference_count=3, min_confidence=0.7)
|
|
assert mid in promoted
|
|
mem = _get_memory_by_id(mid)
|
|
assert mem["status"] == "active"
|
|
|
|
|
|
def test_auto_promote_reinforced_ignores_low_refs(isolated_db):
|
|
from atocore.memory.service import auto_promote_reinforced, create_memory
|
|
from atocore.models.database import get_connection
|
|
from datetime import datetime, timezone
|
|
|
|
mem_obj = create_memory("knowledge", "Some knowledge", status="candidate", confidence=0.7)
|
|
mid = mem_obj.id
|
|
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
|
with get_connection() as conn:
|
|
conn.execute(
|
|
"UPDATE memories SET reference_count = 1, last_referenced_at = ? WHERE id = ?",
|
|
(now, mid),
|
|
)
|
|
|
|
promoted = auto_promote_reinforced(min_reference_count=3, min_confidence=0.7)
|
|
assert mid not in promoted
|
|
mem = _get_memory_by_id(mid)
|
|
assert mem["status"] == "candidate"
|
|
|
|
|
|
def test_expire_stale_candidates(isolated_db):
|
|
from atocore.memory.service import create_memory, expire_stale_candidates
|
|
from atocore.models.database import get_connection
|
|
|
|
mem_obj = create_memory("knowledge", "Old unreferenced fact", status="candidate")
|
|
mid = mem_obj.id
|
|
with get_connection() as conn:
|
|
conn.execute(
|
|
"UPDATE memories SET created_at = datetime('now', '-30 days') WHERE id = ?",
|
|
(mid,),
|
|
)
|
|
|
|
expired = expire_stale_candidates(max_age_days=14)
|
|
assert mid in expired
|
|
mem = _get_memory_by_id(mid)
|
|
assert mem["status"] == "invalid"
|
|
|
|
|
|
# --- Phase 4: memory_audit log ---
|
|
|
|
|
|
def test_audit_create_logs_entry(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory_audit
|
|
|
|
mem = create_memory("knowledge", "test content for audit", actor="test-harness")
|
|
audit = get_memory_audit(mem.id)
|
|
assert len(audit) >= 1
|
|
latest = audit[0]
|
|
assert latest["action"] == "created"
|
|
assert latest["actor"] == "test-harness"
|
|
assert latest["after"]["content"] == "test content for audit"
|
|
|
|
|
|
def test_audit_promote_logs_entry(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory_audit, promote_memory
|
|
|
|
mem = create_memory("knowledge", "candidate for promote", status="candidate")
|
|
promote_memory(mem.id, actor="test-triage")
|
|
audit = get_memory_audit(mem.id)
|
|
actions = [a["action"] for a in audit]
|
|
assert "promoted" in actions
|
|
promote_entry = next(a for a in audit if a["action"] == "promoted")
|
|
assert promote_entry["actor"] == "test-triage"
|
|
assert promote_entry["before"]["status"] == "candidate"
|
|
assert promote_entry["after"]["status"] == "active"
|
|
|
|
|
|
def test_audit_reject_logs_entry(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory_audit, reject_candidate_memory
|
|
|
|
mem = create_memory("knowledge", "candidate for reject", status="candidate")
|
|
reject_candidate_memory(mem.id, actor="test-triage", note="stale")
|
|
audit = get_memory_audit(mem.id)
|
|
actions = [a["action"] for a in audit]
|
|
assert "rejected" in actions
|
|
reject_entry = next(a for a in audit if a["action"] == "rejected")
|
|
assert reject_entry["note"] == "stale"
|
|
|
|
|
|
def test_audit_update_captures_before_after(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory_audit, update_memory
|
|
|
|
mem = create_memory("knowledge", "original content", confidence=0.5)
|
|
update_memory(mem.id, content="updated content", confidence=0.9, actor="human-edit")
|
|
audit = get_memory_audit(mem.id)
|
|
update_entries = [a for a in audit if a["action"] == "updated"]
|
|
assert len(update_entries) >= 1
|
|
u = update_entries[0]
|
|
assert u["before"]["content"] == "original content"
|
|
assert u["after"]["content"] == "updated content"
|
|
assert u["before"]["confidence"] == 0.5
|
|
assert u["after"]["confidence"] == 0.9
|
|
|
|
|
|
def test_audit_reinforce_logs_entry(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory_audit, reinforce_memory
|
|
|
|
mem = create_memory("knowledge", "reinforced mem", confidence=0.5)
|
|
reinforce_memory(mem.id, confidence_delta=0.02)
|
|
audit = get_memory_audit(mem.id)
|
|
actions = [a["action"] for a in audit]
|
|
assert "reinforced" in actions
|
|
|
|
|
|
def test_recent_audit_returns_cross_memory_entries(isolated_db):
|
|
from atocore.memory.service import create_memory, get_recent_audit
|
|
|
|
m1 = create_memory("knowledge", "mem one content", actor="harness")
|
|
m2 = create_memory("knowledge", "mem two content", actor="harness")
|
|
recent = get_recent_audit(limit=10)
|
|
ids = {e["memory_id"] for e in recent}
|
|
assert m1.id in ids and m2.id in ids
|
|
|
|
|
|
# --- Phase 3: domain_tags + valid_until ---
|
|
|
|
|
|
def test_create_memory_with_tags_and_valid_until(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
|
|
mem = create_memory(
|
|
"knowledge",
|
|
"CTE gradient dominates WFE at F/1.2",
|
|
domain_tags=["optics", "thermal", "materials"],
|
|
valid_until="2027-01-01",
|
|
)
|
|
assert mem.domain_tags == ["optics", "thermal", "materials"]
|
|
assert mem.valid_until == "2027-01-01"
|
|
|
|
|
|
def test_create_memory_normalizes_tags(isolated_db):
|
|
from atocore.memory.service import create_memory
|
|
|
|
mem = create_memory(
|
|
"knowledge",
|
|
"some content here",
|
|
domain_tags=[" Optics ", "OPTICS", "Thermal", ""],
|
|
)
|
|
# Duplicates and empty removed; lowercased; stripped
|
|
assert mem.domain_tags == ["optics", "thermal"]
|
|
|
|
|
|
def test_update_memory_sets_tags_and_valid_until(isolated_db):
|
|
from atocore.memory.service import create_memory, update_memory
|
|
from atocore.models.database import get_connection
|
|
|
|
mem = create_memory("knowledge", "some content for update test")
|
|
assert update_memory(
|
|
mem.id,
|
|
domain_tags=["controls", "firmware"],
|
|
valid_until="2026-12-31",
|
|
)
|
|
with get_connection() as conn:
|
|
row = conn.execute("SELECT domain_tags, valid_until FROM memories WHERE id = ?", (mem.id,)).fetchone()
|
|
import json as _json
|
|
assert _json.loads(row["domain_tags"]) == ["controls", "firmware"]
|
|
assert row["valid_until"] == "2026-12-31"
|
|
|
|
|
|
def test_get_memories_for_context_excludes_expired(isolated_db):
|
|
"""Expired active memories must not land in context packs."""
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
# Active but expired
|
|
create_memory(
|
|
"knowledge",
|
|
"stale snapshot from long ago period",
|
|
valid_until="2020-01-01",
|
|
confidence=1.0,
|
|
)
|
|
# Active and valid
|
|
create_memory(
|
|
"knowledge",
|
|
"durable engineering insight stays valid forever",
|
|
confidence=1.0,
|
|
)
|
|
|
|
text, _ = get_memories_for_context(memory_types=["knowledge"], budget=600)
|
|
assert "durable engineering" in text
|
|
assert "stale snapshot" not in text
|
|
|
|
|
|
def test_context_builder_tag_boost_orders_results(isolated_db):
|
|
"""Memories with tags matching query should rank higher."""
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
create_memory("knowledge", "generic content has no obvious overlap with topic", confidence=0.8, domain_tags=[])
|
|
create_memory("knowledge", "generic content has no obvious overlap topic here", confidence=0.8, domain_tags=["optics"])
|
|
|
|
text, _ = get_memories_for_context(
|
|
memory_types=["knowledge"],
|
|
budget=2000,
|
|
query="tell me about optics",
|
|
)
|
|
# Tagged memory should appear before the untagged one
|
|
idx_tagged = text.find("overlap topic here")
|
|
idx_untagged = text.find("overlap with topic")
|
|
assert idx_tagged != -1
|
|
assert idx_untagged != -1
|
|
assert idx_tagged < idx_untagged
|
|
|
|
|
|
def test_project_memory_ranking_ignores_scope_noise(isolated_db):
|
|
"""Project words should not crowd out the actual query intent."""
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
create_memory(
|
|
"project",
|
|
"Norman is the end operator for p06-polisher and requires an explicit manual mode to operate the machine.",
|
|
project="p06-polisher",
|
|
confidence=0.7,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Polisher Control firmware spec document titled 'Fulum Polisher Machine Control Firmware Spec v1' lives in PKM.",
|
|
project="p06-polisher",
|
|
confidence=0.7,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Machine design principle: works fully offline and independently; network connection is for remote access only",
|
|
project="p06-polisher",
|
|
confidence=0.5,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Use Tailscale mesh for RPi remote access to provide SSH, file transfer, and NAT traversal without port forwarding.",
|
|
project="p06-polisher",
|
|
confidence=0.5,
|
|
)
|
|
|
|
text, _ = get_memories_for_context(
|
|
memory_types=["project"],
|
|
project="p06-polisher",
|
|
budget=360,
|
|
query="how do we access the polisher machine remotely",
|
|
)
|
|
|
|
assert "Tailscale" in text
|
|
assert text.find("remote access only") < text.find("Tailscale")
|
|
assert "manual mode" not in text
|
|
|
|
|
|
def test_project_memory_ranking_prefers_multiple_intent_hits(isolated_db):
|
|
"""A rich memory with several query hits should beat a terse one-hit memory."""
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
create_memory(
|
|
"project",
|
|
"CGH vendor selected for p05. Active integration coordination with Katie/AOM.",
|
|
project="p05-interferometer",
|
|
confidence=0.7,
|
|
)
|
|
create_memory(
|
|
"knowledge",
|
|
"Vendor-summary current signal: 4D is the strongest technical Twyman-Green candidate; "
|
|
"a certified used Zygo Verifire SV around $55k emerged as a strong value path.",
|
|
project="p05-interferometer",
|
|
confidence=0.9,
|
|
)
|
|
|
|
text, _ = get_memories_for_context(
|
|
memory_types=["project", "knowledge"],
|
|
project="p05-interferometer",
|
|
budget=220,
|
|
query="what is the current vendor signal for the interferometer procurement",
|
|
)
|
|
|
|
assert "4D" in text
|
|
assert "Zygo" in text
|
|
|
|
|
|
def test_project_memory_query_ranks_beyond_confidence_prefilter(isolated_db):
|
|
"""Query-time ranking should see older low-confidence but exact-intent memories."""
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
for idx in range(35):
|
|
create_memory(
|
|
"project",
|
|
f"High confidence p06 filler memory {idx}: Polisher Control planning note.",
|
|
project="p06-polisher",
|
|
confidence=0.9,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Use Tailscale mesh for RPi remote access to provide SSH, file transfer, and NAT traversal without port forwarding.",
|
|
project="p06-polisher",
|
|
confidence=0.5,
|
|
)
|
|
|
|
text, _ = get_memories_for_context(
|
|
memory_types=["project"],
|
|
project="p06-polisher",
|
|
budget=360,
|
|
query="how do we access the polisher machine remotely",
|
|
)
|
|
|
|
assert "Tailscale" in text
|
|
|
|
|
|
def test_project_memory_query_prefers_exact_cam_fact(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memories_for_context
|
|
|
|
create_memory(
|
|
"project",
|
|
"Polisher Control firmware spec document titled 'Fulum Polisher Machine Control Firmware Spec v1' lives in PKM.",
|
|
project="p06-polisher",
|
|
confidence=0.9,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Polisher Control doc must cover manual mode for Norman as a required deliverable per the plan.",
|
|
project="p06-polisher",
|
|
confidence=0.9,
|
|
)
|
|
create_memory(
|
|
"project",
|
|
"Cam amplitude and offset are mechanically set by operator and read via encoders; no actuators control them.",
|
|
project="p06-polisher",
|
|
confidence=0.5,
|
|
)
|
|
|
|
text, _ = get_memories_for_context(
|
|
memory_types=["project"],
|
|
project="p06-polisher",
|
|
budget=300,
|
|
query="how is cam amplitude controlled on the polisher",
|
|
)
|
|
|
|
assert "encoders" in text
|
|
|
|
|
|
def test_expire_stale_candidates_keeps_reinforced(isolated_db):
|
|
from atocore.memory.service import create_memory, expire_stale_candidates
|
|
from atocore.models.database import get_connection
|
|
|
|
mem_obj = create_memory("knowledge", "Referenced fact", status="candidate")
|
|
mid = mem_obj.id
|
|
with get_connection() as conn:
|
|
conn.execute(
|
|
"UPDATE memories SET reference_count = 1, "
|
|
"created_at = datetime('now', '-30 days') WHERE id = ?",
|
|
(mid,),
|
|
)
|
|
|
|
expired = expire_stale_candidates(max_age_days=14)
|
|
assert mid not in expired
|
|
mem = _get_memory_by_id(mid)
|
|
assert mem["status"] == "candidate"
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Wave 1 (2026-04-29) — counts come from SQL, not from the top-N sample.
|
|
# Exposed by Codex audit when prod /admin/dashboard reported 315 active
|
|
# while /admin/integrity-check reported 1091. The dashboard was building
|
|
# its counts from a confidence-sorted limit=500 fetch.
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def test_get_memory_count_summary_returns_full_table_aggregates(isolated_db):
|
|
"""Counts come from SQL aggregates, not a sampled fetch."""
|
|
from atocore.memory.service import (
|
|
create_memory,
|
|
get_memory_count_summary,
|
|
invalidate_memory,
|
|
)
|
|
|
|
# Create more rows than any reasonable sampling LIMIT so any
|
|
# LIMIT-based counter would visibly disagree with reality.
|
|
for i in range(120):
|
|
create_memory(
|
|
"knowledge",
|
|
f"fact-{i}",
|
|
project="p04-gigabit",
|
|
confidence=0.9,
|
|
status="active",
|
|
)
|
|
for i in range(7):
|
|
create_memory("knowledge", f"cand-{i}", status="candidate")
|
|
invalid_obj = create_memory("knowledge", "to-invalidate", status="active")
|
|
invalidate_memory(invalid_obj.id)
|
|
|
|
summary = get_memory_count_summary()
|
|
assert summary["total"] == 120 + 7 + 1
|
|
assert summary["by_status"]["active"] == 120
|
|
assert summary["by_status"]["candidate"] == 7
|
|
assert summary["by_status"]["invalid"] == 1
|
|
assert summary["active"]["total"] == 120
|
|
assert summary["active"]["by_type"] == {"knowledge": 120}
|
|
assert summary["active"]["by_project"] == {"p04-gigabit": 120}
|
|
|
|
|
|
def test_get_memory_returns_single_row_or_none(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory
|
|
|
|
mem = create_memory("knowledge", "single-row test")
|
|
fetched = get_memory(mem.id)
|
|
assert fetched is not None
|
|
assert fetched.id == mem.id
|
|
assert get_memory("non-existent-id") is None
|
|
|
|
|
|
def test_update_memory_can_change_project_with_canonicalization(
|
|
isolated_db, project_registry
|
|
):
|
|
"""update_memory(project=...) canonicalizes aliases and writes audit."""
|
|
project_registry(("p04-gigabit", ("p04", "gigabit")))
|
|
from atocore.memory.service import (
|
|
create_memory,
|
|
get_memory,
|
|
get_memory_audit,
|
|
update_memory,
|
|
)
|
|
|
|
mem = create_memory("knowledge", "retargetable fact", project="atocore")
|
|
ok = update_memory(mem.id, project="p04") # alias
|
|
assert ok is True
|
|
|
|
refreshed = get_memory(mem.id)
|
|
assert refreshed.project == "p04-gigabit" # canonical, not "p04"
|
|
|
|
audit_rows = get_memory_audit(mem.id, limit=10)
|
|
update_rows = [r for r in audit_rows if r.get("action") == "updated"]
|
|
assert update_rows, f"expected an updated audit row, got {audit_rows}"
|
|
head = update_rows[0]
|
|
assert head["before"]["project"] == "atocore"
|
|
assert head["after"]["project"] == "p04-gigabit"
|
|
|
|
|
|
def test_update_memory_project_unchanged_when_not_passed(isolated_db):
|
|
from atocore.memory.service import create_memory, get_memory, update_memory
|
|
|
|
mem = create_memory("knowledge", "untouched project", project="p06-polisher")
|
|
update_memory(mem.id, content="edited content")
|
|
assert get_memory(mem.id).project == "p06-polisher"
|
|
|
|
|
|
def test_update_memory_to_empty_project_detects_global_duplicate(isolated_db):
|
|
"""Codex P3: when retargeting to project='' (global), the duplicate
|
|
check must scope to the new project. If a global active memory with
|
|
the same content already exists, the update must raise."""
|
|
import pytest as _pytest
|
|
from atocore.memory.service import create_memory, update_memory
|
|
|
|
create_memory("knowledge", "shared global fact", project="")
|
|
scoped = create_memory("knowledge", "shared global fact", project="p04-gigabit")
|
|
|
|
with _pytest.raises(ValueError, match="duplicate active memory"):
|
|
update_memory(scoped.id, project="")
|
|
|
|
|
|
def test_auto_triage_suggested_project_put_body_uses_project_key():
|
|
"""Regression: the auto_triage caller used to PUT {"content": ...}
|
|
which silently dropped the suggested project change. The fix sends
|
|
{"project": suggested}. Inspect the script source so we don't have
|
|
to spin up a live triage run."""
|
|
from pathlib import Path
|
|
|
|
src = Path(__file__).resolve().parents[1] / "scripts" / "auto_triage.py"
|
|
text = src.read_text(encoding="utf-8")
|
|
# The block that PUTs to /memory/{mid} for a suggested_project fix
|
|
assert 'json.dumps({"project": suggested})' in text, (
|
|
"auto_triage.py must PUT {\"project\": suggested} so the "
|
|
"suggested-project correction actually applies. See Wave 1."
|
|
)
|
|
# And must not be back to the old shape
|
|
assert 'json.dumps({"content": cand["content"]})' not in text
|