feat: /admin/triage web UI + auto-drain loop
Makes human triage sustainable. Before: command-line-only review,
auto-triage stopped after 100 candidates/run. Now:
1. Web UI at /admin/triage
- Lists all pending candidates with inline promote/reject/edit
- Edit content in-place before promoting (PUT /memory/{id})
- Change type via dropdown
- Keyboard shortcuts: Y=promote, N=reject, E=edit, S=scroll-next
- Cards fade out after action, queue count updates live
- Zero JS framework — vanilla fetch + event delegation
2. auto_triage.py drains queue
- Loops up to 20 batches (default) of 100 candidates each
- Tracks seen IDs so needs_human items don't reprocess
- Exits cleanly when queue empty
- Nightly cron naturally drains everything
3. Dashboard + wiki surface triage queue
- Dashboard /admin/dashboard: new "triage" section with pending
count + /admin/triage URL + warning/notice severity levels
- Wiki homepage: prominent callout "N candidates awaiting triage —
review now" linking to /admin/triage, styled with triage-warning
(>50) or triage-notice (>20) CSS
Pattern: human intervenes only when AI can't decide. The UI makes
that intervention fast (20 candidates in 60 seconds). Nightly
auto-triage drains the queue so the human queue stays bounded.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -209,75 +209,81 @@ def main():
|
||||
parser.add_argument("--base-url", default=DEFAULT_BASE_URL)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--dry-run", action="store_true", help="preview without executing")
|
||||
parser.add_argument("--max-batches", type=int, default=20,
|
||||
help="Max batches of 100 to process per run (default 20 = 2000 candidates)")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Fetch candidates
|
||||
result = api_get(args.base_url, "/memory?status=candidate&limit=100")
|
||||
candidates = result.get("memories", [])
|
||||
print(f"candidates: {len(candidates)} model: {args.model} dry_run: {args.dry_run}")
|
||||
|
||||
if not candidates:
|
||||
print("queue empty, nothing to triage")
|
||||
return
|
||||
|
||||
# Cache active memories per project for dedup
|
||||
active_cache = {}
|
||||
# Track IDs we've already seen so needs_human items don't re-process
|
||||
# every batch (they stay in the candidate table until a human reviews).
|
||||
seen_ids: set[str] = set()
|
||||
active_cache: dict[str, list] = {}
|
||||
promoted = rejected = needs_human = errors = 0
|
||||
batch_num = 0
|
||||
|
||||
for i, cand in enumerate(candidates, 1):
|
||||
# Light rate-limit pacing: 0.5s between triage calls so a burst
|
||||
# doesn't overwhelm the claude CLI's backend. With ~60s per call
|
||||
# this is negligible overhead but avoids the "all-failed" pattern
|
||||
# we saw on large batches.
|
||||
if i > 1:
|
||||
time.sleep(0.5)
|
||||
while batch_num < args.max_batches:
|
||||
batch_num += 1
|
||||
|
||||
project = cand.get("project") or ""
|
||||
if project not in active_cache:
|
||||
active_cache[project] = fetch_active_memories_for_project(args.base_url, project)
|
||||
result = api_get(args.base_url, "/memory?status=candidate&limit=100")
|
||||
all_candidates = result.get("memories", [])
|
||||
# Filter out already-seen (needs_human from prior batch in same run)
|
||||
candidates = [c for c in all_candidates if c["id"] not in seen_ids]
|
||||
|
||||
verdict_obj = triage_one(cand, active_cache[project], args.model, DEFAULT_TIMEOUT_S)
|
||||
verdict = verdict_obj["verdict"]
|
||||
conf = verdict_obj["confidence"]
|
||||
reason = verdict_obj["reason"]
|
||||
conflicts_with = verdict_obj.get("conflicts_with", "")
|
||||
|
||||
mid = cand["id"]
|
||||
label = f"[{i:2d}/{len(candidates)}] {mid[:8]} [{cand['memory_type']}]"
|
||||
|
||||
if verdict == "promote" and conf >= AUTO_PROMOTE_MIN_CONFIDENCE:
|
||||
if args.dry_run:
|
||||
print(f" WOULD PROMOTE {label} conf={conf:.2f} {reason}")
|
||||
if not candidates:
|
||||
if batch_num == 1:
|
||||
print("queue empty, nothing to triage")
|
||||
else:
|
||||
try:
|
||||
api_post(args.base_url, f"/memory/{mid}/promote")
|
||||
print(f" PROMOTED {label} conf={conf:.2f} {reason}")
|
||||
active_cache[project].append(cand)
|
||||
except Exception:
|
||||
errors += 1
|
||||
promoted += 1
|
||||
elif verdict == "reject":
|
||||
if args.dry_run:
|
||||
print(f" WOULD REJECT {label} conf={conf:.2f} {reason}")
|
||||
else:
|
||||
try:
|
||||
api_post(args.base_url, f"/memory/{mid}/reject")
|
||||
print(f" REJECTED {label} conf={conf:.2f} {reason}")
|
||||
except Exception:
|
||||
errors += 1
|
||||
rejected += 1
|
||||
elif verdict == "contradicts":
|
||||
# Leave candidate in queue but flag the conflict in content
|
||||
# so the wiki/triage shows it. This is conservative: we
|
||||
# don't silently merge or reject when sources disagree.
|
||||
print(f" CONTRADICTS {label} vs {conflicts_with[:8] if conflicts_with else '?'} {reason}")
|
||||
contradicts_count = locals().get('contradicts_count', 0) + 1
|
||||
needs_human += 1
|
||||
else:
|
||||
print(f" NEEDS_HUMAN {label} conf={conf:.2f} {reason}")
|
||||
needs_human += 1
|
||||
print(f"\nQueue drained after batch {batch_num-1}.")
|
||||
break
|
||||
|
||||
print(f"\npromoted={promoted} rejected={rejected} needs_human={needs_human} errors={errors}")
|
||||
print(f"\n=== batch {batch_num}: {len(candidates)} candidates model: {args.model} dry_run: {args.dry_run} ===")
|
||||
|
||||
for i, cand in enumerate(candidates, 1):
|
||||
if i > 1:
|
||||
time.sleep(0.5)
|
||||
|
||||
seen_ids.add(cand["id"])
|
||||
project = cand.get("project") or ""
|
||||
if project not in active_cache:
|
||||
active_cache[project] = fetch_active_memories_for_project(args.base_url, project)
|
||||
|
||||
verdict_obj = triage_one(cand, active_cache[project], args.model, DEFAULT_TIMEOUT_S)
|
||||
verdict = verdict_obj["verdict"]
|
||||
conf = verdict_obj["confidence"]
|
||||
reason = verdict_obj["reason"]
|
||||
conflicts_with = verdict_obj.get("conflicts_with", "")
|
||||
|
||||
mid = cand["id"]
|
||||
label = f"[{i:2d}/{len(candidates)}] {mid[:8]} [{cand['memory_type']}]"
|
||||
|
||||
if verdict == "promote" and conf >= AUTO_PROMOTE_MIN_CONFIDENCE:
|
||||
if args.dry_run:
|
||||
print(f" WOULD PROMOTE {label} conf={conf:.2f} {reason}")
|
||||
else:
|
||||
try:
|
||||
api_post(args.base_url, f"/memory/{mid}/promote")
|
||||
print(f" PROMOTED {label} conf={conf:.2f} {reason}")
|
||||
active_cache[project].append(cand)
|
||||
except Exception:
|
||||
errors += 1
|
||||
promoted += 1
|
||||
elif verdict == "reject":
|
||||
if args.dry_run:
|
||||
print(f" WOULD REJECT {label} conf={conf:.2f} {reason}")
|
||||
else:
|
||||
try:
|
||||
api_post(args.base_url, f"/memory/{mid}/reject")
|
||||
print(f" REJECTED {label} conf={conf:.2f} {reason}")
|
||||
except Exception:
|
||||
errors += 1
|
||||
rejected += 1
|
||||
elif verdict == "contradicts":
|
||||
print(f" CONTRADICTS {label} vs {conflicts_with[:8] if conflicts_with else '?'} {reason}")
|
||||
needs_human += 1
|
||||
else:
|
||||
print(f" NEEDS_HUMAN {label} conf={conf:.2f} {reason}")
|
||||
needs_human += 1
|
||||
|
||||
print(f"\ntotal: promoted={promoted} rejected={rejected} needs_human={needs_human} errors={errors} batches={batch_num}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user