fix: retry + stderr capture + pacing in triage/extractor

Both scripts now:
- Retry up to 3x with 2s/4s exponential backoff on transient
  failures (rate limits, capacity spikes)
- Capture claude CLI stderr in the error message (200 char cap)
  instead of just the exit code — diagnostics actually useful now
- Sleep 0.5s between calls to avoid bursting the backend

Context: last batch run hit 100% failure in triage (every call
exit 1) after 40% failure in extraction. claude CLI worked fine
immediately after, so the failures were capacity/rate-limit
transients. With retry + pacing these batches should complete
cleanly now. 439 candidates are already in the queue waiting
for triage.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-16 16:29:20 -04:00
parent ba36a28453
commit 3011aa77da
2 changed files with 64 additions and 29 deletions

View File

@@ -29,6 +29,7 @@ import os
import shutil
import subprocess
import sys
import time
import tempfile
import urllib.error
import urllib.parse
@@ -131,22 +132,33 @@ def triage_one(candidate, active_memories, model, timeout_s):
user_message,
]
try:
completed = subprocess.run(
args, capture_output=True, text=True,
timeout=timeout_s, cwd=get_sandbox_cwd(),
encoding="utf-8", errors="replace",
)
except subprocess.TimeoutExpired:
return {"verdict": "needs_human", "confidence": 0.0, "reason": "triage model timed out"}
except Exception as exc:
return {"verdict": "needs_human", "confidence": 0.0, "reason": f"subprocess error: {exc}"}
# Retry with exponential backoff on transient failures (rate limits etc)
last_error = ""
for attempt in range(3):
if attempt > 0:
time.sleep(2 ** attempt) # 2s, 4s
try:
completed = subprocess.run(
args, capture_output=True, text=True,
timeout=timeout_s, cwd=get_sandbox_cwd(),
encoding="utf-8", errors="replace",
)
except subprocess.TimeoutExpired:
last_error = "triage model timed out"
continue
except Exception as exc:
last_error = f"subprocess error: {exc}"
continue
if completed.returncode != 0:
return {"verdict": "needs_human", "confidence": 0.0, "reason": f"claude exit {completed.returncode}"}
if completed.returncode == 0:
raw = (completed.stdout or "").strip()
return parse_verdict(raw)
raw = (completed.stdout or "").strip()
return parse_verdict(raw)
# Capture stderr for diagnostics (truncate to 200 chars)
stderr = (completed.stderr or "").strip()[:200]
last_error = f"claude exit {completed.returncode}: {stderr}" if stderr else f"claude exit {completed.returncode}"
return {"verdict": "needs_human", "confidence": 0.0, "reason": last_error}
def parse_verdict(raw):
@@ -213,6 +225,13 @@ def main():
promoted = rejected = needs_human = errors = 0
for i, cand in enumerate(candidates, 1):
# Light rate-limit pacing: 0.5s between triage calls so a burst
# doesn't overwhelm the claude CLI's backend. With ~60s per call
# this is negligible overhead but avoids the "all-failed" pattern
# we saw on large batches.
if i > 1:
time.sleep(0.5)
project = cand.get("project") or ""
if project not in active_cache:
active_cache[project] = fetch_active_memories_for_project(args.base_url, project)