feat: Implement ACE Context Engineering framework (SYS_17)
Complete implementation of Agentic Context Engineering (ACE) framework: Core modules (optimization_engine/context/): - playbook.py: AtomizerPlaybook with helpful/harmful scoring - reflector.py: AtomizerReflector for insight extraction - session_state.py: Context isolation (exposed/isolated state) - feedback_loop.py: Automated learning from trial results - compaction.py: Long-session context management - cache_monitor.py: KV-cache optimization tracking - runner_integration.py: OptimizationRunner integration Dashboard integration: - context.py: 12 REST API endpoints for playbook management Tests: - test_context_engineering.py: 44 unit tests - test_context_integration.py: 16 integration tests Documentation: - CONTEXT_ENGINEERING_REPORT.md: Comprehensive implementation report - CONTEXT_ENGINEERING_API.md: Complete API reference - SYS_17_CONTEXT_ENGINEERING.md: System protocol - Updated cheatsheet with SYS_17 quick reference - Enhanced bootstrap (00_BOOTSTRAP_V2.md) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
467
optimization_engine/context/reflector.py
Normal file
467
optimization_engine/context/reflector.py
Normal file
@@ -0,0 +1,467 @@
|
||||
"""
|
||||
Atomizer Reflector - Optimization Outcome Analysis
|
||||
|
||||
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||
|
||||
The Reflector analyzes optimization outcomes to extract actionable insights:
|
||||
- Examines successful and failed trials
|
||||
- Extracts patterns that led to success/failure
|
||||
- Formats insights for Curator (Playbook) integration
|
||||
|
||||
This implements the "Reflector" role from the ACE framework's
|
||||
Generator -> Reflector -> Curator pipeline.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
|
||||
from .playbook import AtomizerPlaybook, InsightCategory, PlaybookItem
|
||||
|
||||
|
||||
@dataclass
|
||||
class OptimizationOutcome:
|
||||
"""
|
||||
Captured outcome from an optimization trial.
|
||||
|
||||
Contains all information needed to analyze what happened
|
||||
and extract insights for the playbook.
|
||||
"""
|
||||
trial_number: int
|
||||
success: bool
|
||||
objective_value: Optional[float]
|
||||
constraint_violations: List[str] = field(default_factory=list)
|
||||
solver_errors: List[str] = field(default_factory=list)
|
||||
design_variables: Dict[str, float] = field(default_factory=dict)
|
||||
extractor_used: str = ""
|
||||
duration_seconds: float = 0.0
|
||||
notes: str = ""
|
||||
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||
|
||||
# Optional metadata
|
||||
solver_type: str = ""
|
||||
mesh_info: Dict[str, Any] = field(default_factory=dict)
|
||||
convergence_info: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"trial_number": self.trial_number,
|
||||
"success": self.success,
|
||||
"objective_value": self.objective_value,
|
||||
"constraint_violations": self.constraint_violations,
|
||||
"solver_errors": self.solver_errors,
|
||||
"design_variables": self.design_variables,
|
||||
"extractor_used": self.extractor_used,
|
||||
"duration_seconds": self.duration_seconds,
|
||||
"notes": self.notes,
|
||||
"timestamp": self.timestamp,
|
||||
"solver_type": self.solver_type,
|
||||
"mesh_info": self.mesh_info,
|
||||
"convergence_info": self.convergence_info
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class InsightCandidate:
|
||||
"""
|
||||
A candidate insight extracted from trial analysis.
|
||||
|
||||
Not yet committed to playbook - pending review/aggregation.
|
||||
"""
|
||||
category: InsightCategory
|
||||
content: str
|
||||
helpful: bool
|
||||
trial_number: Optional[int] = None
|
||||
confidence: float = 0.5
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class AtomizerReflector:
|
||||
"""
|
||||
Analyzes optimization outcomes and extracts actionable insights.
|
||||
|
||||
Implements the Reflector role from ACE framework:
|
||||
- Examines successful and failed trials
|
||||
- Extracts patterns that led to success/failure
|
||||
- Formats insights for Curator integration
|
||||
|
||||
Usage:
|
||||
playbook = AtomizerPlaybook.load(path)
|
||||
reflector = AtomizerReflector(playbook)
|
||||
|
||||
# After each trial
|
||||
reflector.analyze_trial(outcome)
|
||||
|
||||
# After study completion
|
||||
reflector.analyze_study_completion(stats)
|
||||
|
||||
# Commit insights to playbook
|
||||
count = reflector.commit_insights()
|
||||
playbook.save(path)
|
||||
"""
|
||||
|
||||
# Error pattern matchers for insight extraction
|
||||
ERROR_PATTERNS = {
|
||||
"convergence": [
|
||||
r"convergence",
|
||||
r"did not converge",
|
||||
r"iteration limit",
|
||||
r"max iterations"
|
||||
],
|
||||
"mesh": [
|
||||
r"mesh",
|
||||
r"element",
|
||||
r"distorted",
|
||||
r"jacobian",
|
||||
r"negative volume"
|
||||
],
|
||||
"singularity": [
|
||||
r"singular",
|
||||
r"matrix",
|
||||
r"ill-conditioned",
|
||||
r"pivot"
|
||||
],
|
||||
"memory": [
|
||||
r"memory",
|
||||
r"allocation",
|
||||
r"out of memory",
|
||||
r"insufficient"
|
||||
],
|
||||
"license": [
|
||||
r"license",
|
||||
r"checkout",
|
||||
r"unavailable"
|
||||
],
|
||||
"boundary": [
|
||||
r"boundary",
|
||||
r"constraint",
|
||||
r"spc",
|
||||
r"load"
|
||||
]
|
||||
}
|
||||
|
||||
def __init__(self, playbook: AtomizerPlaybook):
|
||||
"""
|
||||
Initialize reflector with target playbook.
|
||||
|
||||
Args:
|
||||
playbook: The playbook to add insights to
|
||||
"""
|
||||
self.playbook = playbook
|
||||
self.pending_insights: List[InsightCandidate] = []
|
||||
self.analyzed_trials: List[int] = []
|
||||
|
||||
def analyze_trial(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||
"""
|
||||
Analyze a single trial outcome and extract insights.
|
||||
|
||||
Returns list of insight candidates (not yet added to playbook).
|
||||
|
||||
Args:
|
||||
outcome: The trial outcome to analyze
|
||||
|
||||
Returns:
|
||||
List of extracted insight candidates
|
||||
"""
|
||||
insights = []
|
||||
self.analyzed_trials.append(outcome.trial_number)
|
||||
|
||||
# Analyze solver errors
|
||||
for error in outcome.solver_errors:
|
||||
error_insights = self._analyze_error(error, outcome)
|
||||
insights.extend(error_insights)
|
||||
|
||||
# Analyze constraint violations
|
||||
for violation in outcome.constraint_violations:
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Constraint violation: {violation}",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
tags=["constraint", "violation"]
|
||||
))
|
||||
|
||||
# Analyze successful patterns
|
||||
if outcome.success and outcome.objective_value is not None:
|
||||
success_insights = self._analyze_success(outcome)
|
||||
insights.extend(success_insights)
|
||||
|
||||
# Analyze duration (performance insights)
|
||||
if outcome.duration_seconds > 0:
|
||||
perf_insights = self._analyze_performance(outcome)
|
||||
insights.extend(perf_insights)
|
||||
|
||||
self.pending_insights.extend(insights)
|
||||
return insights
|
||||
|
||||
def _analyze_error(
|
||||
self,
|
||||
error: str,
|
||||
outcome: OptimizationOutcome
|
||||
) -> List[InsightCandidate]:
|
||||
"""Analyze a solver error and extract relevant insights."""
|
||||
insights = []
|
||||
error_lower = error.lower()
|
||||
|
||||
# Classify error type
|
||||
error_type = "unknown"
|
||||
for etype, patterns in self.ERROR_PATTERNS.items():
|
||||
if any(re.search(p, error_lower) for p in patterns):
|
||||
error_type = etype
|
||||
break
|
||||
|
||||
# Generate insight based on error type
|
||||
if error_type == "convergence":
|
||||
config_summary = self._summarize_config(outcome)
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Convergence failure with {config_summary}. Consider relaxing solver tolerances or reviewing mesh quality.",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.7,
|
||||
tags=["convergence", "solver", error_type]
|
||||
))
|
||||
|
||||
elif error_type == "mesh":
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Mesh-related error: {error[:100]}. Review element quality and mesh density.",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.8,
|
||||
tags=["mesh", "element", error_type]
|
||||
))
|
||||
|
||||
elif error_type == "singularity":
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Matrix singularity detected. Check boundary conditions and constraints for rigid body modes.",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.9,
|
||||
tags=["singularity", "boundary", error_type]
|
||||
))
|
||||
|
||||
elif error_type == "memory":
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.TOOL,
|
||||
content=f"Memory allocation failure. Consider reducing mesh density or using out-of-core solver.",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.8,
|
||||
tags=["memory", "performance", error_type]
|
||||
))
|
||||
|
||||
else:
|
||||
# Generic error insight
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Solver error: {error[:150]}",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.5,
|
||||
tags=["error", error_type]
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_success(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||
"""Analyze successful trial and extract winning patterns."""
|
||||
insights = []
|
||||
|
||||
# Record successful design variable ranges
|
||||
design_summary = self._summarize_design(outcome)
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.STRATEGY,
|
||||
content=f"Successful design: {design_summary}",
|
||||
helpful=True,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.6,
|
||||
tags=["success", "design"]
|
||||
))
|
||||
|
||||
# Record extractor performance if fast
|
||||
if outcome.duration_seconds > 0 and outcome.duration_seconds < 60:
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.TOOL,
|
||||
content=f"Fast solve ({outcome.duration_seconds:.1f}s) using {outcome.extractor_used}",
|
||||
helpful=True,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.5,
|
||||
tags=["performance", "extractor"]
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_performance(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||
"""Analyze performance characteristics."""
|
||||
insights = []
|
||||
|
||||
# Flag very slow trials
|
||||
if outcome.duration_seconds > 300: # > 5 minutes
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.TOOL,
|
||||
content=f"Slow trial ({outcome.duration_seconds/60:.1f} min). Consider mesh refinement or solver settings.",
|
||||
helpful=False,
|
||||
trial_number=outcome.trial_number,
|
||||
confidence=0.6,
|
||||
tags=["performance", "slow"]
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def analyze_study_completion(
|
||||
self,
|
||||
study_name: str,
|
||||
total_trials: int,
|
||||
best_value: float,
|
||||
convergence_rate: float,
|
||||
method: str = ""
|
||||
) -> List[InsightCandidate]:
|
||||
"""
|
||||
Analyze completed study and extract high-level insights.
|
||||
|
||||
Args:
|
||||
study_name: Name of the completed study
|
||||
total_trials: Total number of trials run
|
||||
best_value: Best objective value achieved
|
||||
convergence_rate: Fraction of trials that succeeded (0.0-1.0)
|
||||
method: Optimization method used
|
||||
|
||||
Returns:
|
||||
List of study-level insight candidates
|
||||
"""
|
||||
insights = []
|
||||
|
||||
if convergence_rate > 0.9:
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.STRATEGY,
|
||||
content=f"Study '{study_name}' achieved {convergence_rate:.0%} success rate - configuration is robust for similar problems.",
|
||||
helpful=True,
|
||||
confidence=0.8,
|
||||
tags=["study", "robust", "high_success"]
|
||||
))
|
||||
elif convergence_rate < 0.5:
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.MISTAKE,
|
||||
content=f"Study '{study_name}' had only {convergence_rate:.0%} success rate - review mesh quality and solver settings.",
|
||||
helpful=False,
|
||||
confidence=0.8,
|
||||
tags=["study", "low_success", "needs_review"]
|
||||
))
|
||||
|
||||
# Method-specific insights
|
||||
if method and total_trials > 20:
|
||||
if convergence_rate > 0.8:
|
||||
insights.append(InsightCandidate(
|
||||
category=InsightCategory.STRATEGY,
|
||||
content=f"{method} performed well on '{study_name}' ({convergence_rate:.0%} success, {total_trials} trials).",
|
||||
helpful=True,
|
||||
confidence=0.7,
|
||||
tags=["method", method.lower(), "performance"]
|
||||
))
|
||||
|
||||
self.pending_insights.extend(insights)
|
||||
return insights
|
||||
|
||||
def commit_insights(self, min_confidence: float = 0.0) -> int:
|
||||
"""
|
||||
Commit pending insights to playbook (Curator handoff).
|
||||
|
||||
Aggregates similar insights and adds to playbook with
|
||||
appropriate helpful/harmful counts.
|
||||
|
||||
Args:
|
||||
min_confidence: Minimum confidence threshold to commit
|
||||
|
||||
Returns:
|
||||
Number of insights added to playbook
|
||||
"""
|
||||
count = 0
|
||||
|
||||
for insight in self.pending_insights:
|
||||
if insight.confidence < min_confidence:
|
||||
continue
|
||||
|
||||
item = self.playbook.add_insight(
|
||||
category=insight.category,
|
||||
content=insight.content,
|
||||
source_trial=insight.trial_number,
|
||||
tags=insight.tags
|
||||
)
|
||||
|
||||
# Record initial outcome based on insight nature
|
||||
if not insight.helpful:
|
||||
self.playbook.record_outcome(item.id, helpful=False)
|
||||
|
||||
count += 1
|
||||
|
||||
self.pending_insights = []
|
||||
return count
|
||||
|
||||
def get_pending_count(self) -> int:
|
||||
"""Get number of pending insights."""
|
||||
return len(self.pending_insights)
|
||||
|
||||
def clear_pending(self) -> None:
|
||||
"""Clear pending insights without committing."""
|
||||
self.pending_insights = []
|
||||
|
||||
def _summarize_config(self, outcome: OptimizationOutcome) -> str:
|
||||
"""Create brief config summary for error context."""
|
||||
parts = []
|
||||
if outcome.extractor_used:
|
||||
parts.append(f"extractor={outcome.extractor_used}")
|
||||
parts.append(f"vars={len(outcome.design_variables)}")
|
||||
if outcome.solver_type:
|
||||
parts.append(f"solver={outcome.solver_type}")
|
||||
return ", ".join(parts)
|
||||
|
||||
def _summarize_design(self, outcome: OptimizationOutcome) -> str:
|
||||
"""Create brief design summary."""
|
||||
parts = []
|
||||
if outcome.objective_value is not None:
|
||||
parts.append(f"obj={outcome.objective_value:.4g}")
|
||||
|
||||
# Include up to 3 design variables
|
||||
var_items = list(outcome.design_variables.items())[:3]
|
||||
for k, v in var_items:
|
||||
parts.append(f"{k}={v:.3g}")
|
||||
|
||||
if len(outcome.design_variables) > 3:
|
||||
parts.append(f"(+{len(outcome.design_variables)-3} more)")
|
||||
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
class ReflectorFactory:
|
||||
"""Factory for creating reflectors with different configurations."""
|
||||
|
||||
@staticmethod
|
||||
def create_for_study(study_dir: Path) -> AtomizerReflector:
|
||||
"""
|
||||
Create a reflector for a specific study.
|
||||
|
||||
Args:
|
||||
study_dir: Path to the study directory
|
||||
|
||||
Returns:
|
||||
Configured AtomizerReflector
|
||||
"""
|
||||
playbook_path = study_dir / "3_results" / "playbook.json"
|
||||
playbook = AtomizerPlaybook.load(playbook_path)
|
||||
return AtomizerReflector(playbook)
|
||||
|
||||
@staticmethod
|
||||
def create_global() -> AtomizerReflector:
|
||||
"""
|
||||
Create a reflector using the global playbook.
|
||||
|
||||
Returns:
|
||||
AtomizerReflector using global playbook
|
||||
"""
|
||||
from .playbook import get_playbook
|
||||
return AtomizerReflector(get_playbook())
|
||||
Reference in New Issue
Block a user