Files
Atomizer/hq/workspaces/shared/skills/orchestrate/challenge-mode.sh

98 lines
3.4 KiB
Bash
Executable File

#!/usr/bin/env bash
# Auditor Challenge Mode
# Triggers auditor to proactively review recent work from target agents
# Usage:
# bash challenge-mode.sh <agent> [scope]
# bash challenge-mode.sh tech-lead "Review their material selection approach"
# bash challenge-mode.sh all "Challenge all recent decisions"
set -euo pipefail
ORCHESTRATE="/home/papa/atomizer/workspaces/shared/skills/orchestrate/orchestrate.sh"
HANDOFF_DIR="/home/papa/atomizer/handoffs"
TARGET="${1:?Usage: challenge-mode.sh <agent|all> [scope]}"
SCOPE="${2:-Review their most recent completed work for rigor, assumptions, and missed alternatives}"
# Gather recent completed handoffs from target agent(s)
CONTEXT=$(python3 -c "
import json, glob
target = '$TARGET'
handoffs = sorted(glob.glob('$HANDOFF_DIR/orch-*.json'), reverse=True)
results = []
for path in handoffs[:50]:
try:
with open(path) as f:
data = json.load(f)
agent = data.get('agent', '')
status = data.get('status', '').lower()
if status != 'complete':
continue
if target != 'all' and agent != target:
continue
result_preview = (data.get('result', '') or '')[:500]
deliverable = data.get('deliverable', {}) or {}
results.append({
'agent': agent,
'runId': data.get('runId', ''),
'result_preview': result_preview,
'deliverable_summary': deliverable.get('summary', 'none'),
'confidence': data.get('confidence', 'unknown'),
'notes': (data.get('notes', '') or '')[:200]
})
if len(results) >= 5:
break
except:
continue
print(json.dumps(results, indent=2))
")
if [ "$CONTEXT" = "[]" ]; then
echo "No recent completed work found for $TARGET"
exit 0
fi
# Save context for auditor
CONTEXT_FILE=$(mktemp /tmp/challenge-context-XXXX.json)
echo "$CONTEXT" > "$CONTEXT_FILE"
# Build the challenge task
if [ "$TARGET" = "all" ]; then
CHALLENGE_TASK="CHALLENGE MODE: Review the recent completed work from ALL agents.
Your task: $SCOPE
For each piece of work, apply your full audit mindset:
1. Challenge assumptions — what did they take for granted?
2. Check for missed alternatives — was this the best approach or just the first?
3. Validate reasoning — is the logic sound? Are there logical gaps?
4. Question confidence levels — is 'high confidence' justified?
5. Look for blind spots — what didn't they consider?
Be constructive but rigorous. Your goal is to make the team's work BETTER, not just find faults.
Produce a Challenge Report with findings per agent and overall recommendations."
else
CHALLENGE_TASK="CHALLENGE MODE: Review $TARGET's recent completed work.
Your task: $SCOPE
Apply your full audit mindset to $TARGET's output:
1. Challenge assumptions — what did they take for granted?
2. Check for missed alternatives — was this the best approach or just the first?
3. Validate reasoning — is the logic sound? Are there logical gaps?
4. Question confidence levels — is 'high confidence' justified?
5. Look for blind spots — what didn't they consider?
6. Suggest improvements — concrete, actionable next steps
Be constructive but rigorous. Your goal is to make $TARGET's work BETTER.
Produce a Challenge Report with specific findings and recommendations."
fi
# Delegate to auditor via orchestration
bash "$ORCHESTRATE" auditor "$CHALLENGE_TASK" \
--context "$CONTEXT_FILE" \
--timeout 300
rm -f "$CONTEXT_FILE"