## Documentation Updates - DASHBOARD.md: Updated to V3.0 with Canvas V3 features, file browser, introspection - DASHBOARD_IMPLEMENTATION_STATUS.md: Marked Canvas V3 features as COMPLETE - CANVAS.md: New comprehensive guide for Canvas Builder V3 with all features - CLAUDE.md: Added dashboard quick reference and Canvas V3 features ## Canvas V3 Features Documented - File Browser: Browse studies directory for model files - Model Introspection: Auto-discover expressions, solver type, dependencies - One-Click Add: Add expressions as design variables instantly - Claude Bug Fixes: WebSocket reconnection, SQL errors resolved - Health Check: /api/health endpoint for monitoring ## Backend Services - NX introspection service with expression discovery - File browser API with type filtering - Claude session management improvements - Context builder enhancements ## Frontend Components - FileBrowser: Modal for file selection with search - IntrospectionPanel: View discovered model information - ExpressionSelector: Dropdown for design variable configuration - Improved chat hooks with reconnection logic ## Plan Documents - Added RALPH_LOOP_CANVAS_V2/V3 implementation records - Added ATOMIZER_DASHBOARD_V2_MASTER_PLAN - Added investigation and sync documentation Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
413 lines
16 KiB
Python
413 lines
16 KiB
Python
"""
|
|
Context Builder
|
|
|
|
Builds rich context prompts for Claude sessions based on mode and study.
|
|
"""
|
|
|
|
import json
|
|
import sqlite3
|
|
from pathlib import Path
|
|
from typing import Any, Dict, List, Literal, Optional
|
|
|
|
# Atomizer root directory
|
|
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
|
|
|
|
|
|
class ContextBuilder:
|
|
"""Builds context prompts for Claude sessions"""
|
|
|
|
def __init__(self):
|
|
self.atomizer_root = ATOMIZER_ROOT
|
|
self.studies_dir = ATOMIZER_ROOT / "studies"
|
|
|
|
def build(
|
|
self,
|
|
mode: Literal["user", "power"],
|
|
study_id: Optional[str] = None,
|
|
conversation_history: Optional[List[Dict[str, Any]]] = None,
|
|
canvas_state: Optional[Dict[str, Any]] = None,
|
|
) -> str:
|
|
"""
|
|
Build full system prompt with context.
|
|
|
|
Args:
|
|
mode: "user" for safe operations, "power" for full access
|
|
study_id: Optional study name to provide context for
|
|
conversation_history: Optional recent messages for continuity
|
|
canvas_state: Optional canvas state (nodes, edges) from the UI
|
|
|
|
Returns:
|
|
Complete system prompt string
|
|
"""
|
|
parts = [self._base_context(mode)]
|
|
|
|
# Canvas context takes priority - if user is working on a canvas, include it
|
|
if canvas_state:
|
|
parts.append(self._canvas_context(canvas_state))
|
|
|
|
if study_id:
|
|
parts.append(self._study_context(study_id))
|
|
else:
|
|
parts.append(self._global_context())
|
|
|
|
if conversation_history:
|
|
parts.append(self._conversation_context(conversation_history))
|
|
|
|
parts.append(self._mode_instructions(mode))
|
|
|
|
return "\n\n---\n\n".join(parts)
|
|
|
|
def build_study_context(self, study_id: str) -> str:
|
|
"""Build just the study context (for updates)"""
|
|
return self._study_context(study_id)
|
|
|
|
def _base_context(self, mode: str) -> str:
|
|
"""Base identity and capabilities"""
|
|
return f"""# Atomizer Assistant
|
|
|
|
You are the Atomizer Assistant - an expert system for structural optimization using FEA.
|
|
|
|
**Current Mode**: {mode.upper()}
|
|
|
|
Your role:
|
|
- Help engineers with FEA optimization workflows
|
|
- Create, configure, and run optimization studies
|
|
- Analyze results and provide insights
|
|
- Explain FEA concepts and methodology
|
|
|
|
Important guidelines:
|
|
- Be concise and professional
|
|
- Use technical language appropriate for engineers
|
|
- You are "Atomizer Assistant", not a generic AI
|
|
- Use the available MCP tools to perform actions
|
|
- When asked about studies, use the appropriate tools to get real data
|
|
"""
|
|
|
|
def _study_context(self, study_id: str) -> str:
|
|
"""Context for a specific study"""
|
|
study_dir = self.studies_dir / study_id
|
|
if not study_dir.exists():
|
|
return f"# Current Study: {study_id}\n\n**Status**: Study directory not found."
|
|
|
|
context = f"# Current Study: {study_id}\n\n"
|
|
|
|
# Load configuration
|
|
config_path = study_dir / "1_setup" / "optimization_config.json"
|
|
if not config_path.exists():
|
|
config_path = study_dir / "optimization_config.json"
|
|
|
|
if config_path.exists():
|
|
try:
|
|
with open(config_path) as f:
|
|
config = json.load(f)
|
|
|
|
context += "## Configuration\n\n"
|
|
|
|
# Design variables
|
|
dvs = config.get("design_variables", [])
|
|
if dvs:
|
|
context += "**Design Variables:**\n"
|
|
for dv in dvs[:10]:
|
|
bounds = f"[{dv.get('lower', '?')}, {dv.get('upper', '?')}]"
|
|
context += f"- {dv.get('name', 'unnamed')}: {bounds}\n"
|
|
if len(dvs) > 10:
|
|
context += f"- ... and {len(dvs) - 10} more\n"
|
|
|
|
# Objectives
|
|
objs = config.get("objectives", [])
|
|
if objs:
|
|
context += "\n**Objectives:**\n"
|
|
for obj in objs:
|
|
direction = obj.get("direction", "minimize")
|
|
context += f"- {obj.get('name', 'unnamed')} ({direction})\n"
|
|
|
|
# Constraints
|
|
constraints = config.get("constraints", [])
|
|
if constraints:
|
|
context += "\n**Constraints:**\n"
|
|
for c in constraints:
|
|
context += f"- {c.get('name', 'unnamed')}: {c.get('type', 'unknown')}\n"
|
|
|
|
# Method
|
|
method = config.get("method", "TPE")
|
|
max_trials = config.get("max_trials", "not set")
|
|
context += f"\n**Method**: {method}, max_trials: {max_trials}\n"
|
|
|
|
except (json.JSONDecodeError, IOError) as e:
|
|
context += f"\n*Config file exists but could not be parsed: {e}*\n"
|
|
|
|
# Check for results
|
|
db_path = study_dir / "3_results" / "study.db"
|
|
if db_path.exists():
|
|
try:
|
|
conn = sqlite3.connect(db_path)
|
|
count = conn.execute(
|
|
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
|
|
).fetchone()[0]
|
|
|
|
best = conn.execute("""
|
|
SELECT MIN(tv.value) FROM trial_values tv
|
|
JOIN trials t ON tv.trial_id = t.trial_id
|
|
WHERE t.state = 'COMPLETE'
|
|
""").fetchone()[0]
|
|
|
|
context += f"\n## Results Status\n\n"
|
|
context += f"- **Trials completed**: {count}\n"
|
|
if best is not None:
|
|
context += f"- **Best objective**: {best:.6f}\n"
|
|
|
|
conn.close()
|
|
except Exception:
|
|
pass
|
|
|
|
return context
|
|
|
|
def _global_context(self) -> str:
|
|
"""Context when no study is selected"""
|
|
context = "# Available Studies\n\n"
|
|
|
|
if self.studies_dir.exists():
|
|
studies = [
|
|
d.name
|
|
for d in self.studies_dir.iterdir()
|
|
if d.is_dir() and not d.name.startswith("_")
|
|
]
|
|
|
|
if studies:
|
|
context += "The following studies are available:\n\n"
|
|
for name in sorted(studies)[:20]:
|
|
context += f"- {name}\n"
|
|
if len(studies) > 20:
|
|
context += f"\n... and {len(studies) - 20} more\n"
|
|
else:
|
|
context += "No studies found. Use `create_study` tool to create one.\n"
|
|
else:
|
|
context += "Studies directory not found.\n"
|
|
|
|
context += "\n## Quick Actions\n\n"
|
|
context += "- **Create study**: Describe what you want to optimize\n"
|
|
context += "- **List studies**: Use `list_studies` tool for details\n"
|
|
context += "- **Open study**: Ask about a specific study by name\n"
|
|
|
|
return context
|
|
|
|
def _conversation_context(self, history: List[Dict[str, Any]]) -> str:
|
|
"""Recent conversation for continuity"""
|
|
if not history:
|
|
return ""
|
|
|
|
context = "# Recent Conversation\n\n"
|
|
for msg in history[-10:]:
|
|
role = "User" if msg.get("role") == "user" else "Assistant"
|
|
content = msg.get("content", "")[:500]
|
|
if len(msg.get("content", "")) > 500:
|
|
content += "..."
|
|
context += f"**{role}**: {content}\n\n"
|
|
|
|
return context
|
|
|
|
def _canvas_context(self, canvas_state: Dict[str, Any]) -> str:
|
|
"""
|
|
Build context from canvas state (nodes and edges).
|
|
|
|
This is CRITICAL for Claude to understand the current workflow
|
|
being built in the Canvas UI.
|
|
"""
|
|
context = "# Current Canvas State\n\n"
|
|
context += "**You are assisting the user with a Canvas Builder workflow.**\n"
|
|
context += "The canvas represents an optimization pipeline being configured visually.\n\n"
|
|
|
|
nodes = canvas_state.get("nodes", [])
|
|
edges = canvas_state.get("edges", [])
|
|
study_name = canvas_state.get("studyName", "Untitled")
|
|
study_path = canvas_state.get("studyPath", None)
|
|
|
|
context += f"**Study Name**: {study_name}\n"
|
|
if study_path:
|
|
context += f"**Study Path**: {study_path}\n"
|
|
context += "\n"
|
|
|
|
# Group nodes by type
|
|
node_types = {}
|
|
for node in nodes:
|
|
node_type = node.get("type", "unknown")
|
|
if node_type not in node_types:
|
|
node_types[node_type] = []
|
|
node_types[node_type].append(node)
|
|
|
|
# Model node
|
|
if "model" in node_types:
|
|
model = node_types["model"][0]
|
|
data = model.get("data", {})
|
|
context += "## Model\n"
|
|
context += f"- **Label**: {data.get('label', 'Model')}\n"
|
|
context += f"- **File Path**: {data.get('filePath', 'Not set')}\n"
|
|
context += f"- **File Type**: {data.get('fileType', 'Not set')}\n\n"
|
|
|
|
# Solver node
|
|
if "solver" in node_types:
|
|
solver = node_types["solver"][0]
|
|
data = solver.get("data", {})
|
|
context += "## Solver\n"
|
|
context += f"- **Type**: {data.get('solverType', 'Not set')}\n\n"
|
|
|
|
# Design variables
|
|
if "designVar" in node_types:
|
|
context += "## Design Variables\n\n"
|
|
context += "| Name | Expression | Min | Max | Baseline | Unit | Enabled |\n"
|
|
context += "|------|------------|-----|-----|----------|------|---------|\n"
|
|
for dv in node_types["designVar"]:
|
|
data = dv.get("data", {})
|
|
name = data.get("label", "?")
|
|
expr = data.get("expressionName", data.get("label", "?"))
|
|
min_val = data.get("minValue", "?")
|
|
max_val = data.get("maxValue", "?")
|
|
baseline = data.get("baseline", "-")
|
|
unit = data.get("unit", "-")
|
|
enabled = "✓" if data.get("enabled", True) else "✗"
|
|
context += f"| {name} | {expr} | {min_val} | {max_val} | {baseline} | {unit} | {enabled} |\n"
|
|
context += "\n"
|
|
|
|
# Extractors
|
|
if "extractor" in node_types:
|
|
context += "## Extractors\n\n"
|
|
for ext in node_types["extractor"]:
|
|
data = ext.get("data", {})
|
|
context += f"### {data.get('extractorName', data.get('label', 'Extractor'))}\n"
|
|
context += f"- **ID**: {data.get('extractorId', 'Not set')}\n"
|
|
context += f"- **Type**: {data.get('extractorType', 'Not set')}\n"
|
|
if data.get("extractMethod"):
|
|
context += f"- **Method**: {data.get('extractMethod')}\n"
|
|
if data.get("innerRadius"):
|
|
context += f"- **Inner Radius**: {data.get('innerRadius')}\n"
|
|
if data.get("nModes"):
|
|
context += f"- **Zernike Modes**: {data.get('nModes')}\n"
|
|
if data.get("subcases"):
|
|
context += f"- **Subcases**: {data.get('subcases')}\n"
|
|
if data.get("config"):
|
|
config = data.get("config", {})
|
|
if config.get("subcaseLabels"):
|
|
context += f"- **Subcase Labels**: {config.get('subcaseLabels')}\n"
|
|
if config.get("referenceSubcase"):
|
|
context += f"- **Reference Subcase**: {config.get('referenceSubcase')}\n"
|
|
context += "\n"
|
|
|
|
# Objectives
|
|
if "objective" in node_types:
|
|
context += "## Objectives\n\n"
|
|
context += "| Name | Direction | Weight | Penalty |\n"
|
|
context += "|------|-----------|--------|---------|\n"
|
|
for obj in node_types["objective"]:
|
|
data = obj.get("data", {})
|
|
name = data.get("name", data.get("label", "?"))
|
|
direction = data.get("direction", "minimize")
|
|
weight = data.get("weight", 1)
|
|
penalty = data.get("penaltyWeight", "-")
|
|
context += f"| {name} | {direction} | {weight} | {penalty} |\n"
|
|
context += "\n"
|
|
|
|
# Constraints
|
|
if "constraint" in node_types:
|
|
context += "## Constraints\n\n"
|
|
context += "| Name | Operator | Value |\n"
|
|
context += "|------|----------|-------|\n"
|
|
for con in node_types["constraint"]:
|
|
data = con.get("data", {})
|
|
name = data.get("name", data.get("label", "?"))
|
|
operator = data.get("operator", "?")
|
|
value = data.get("value", "?")
|
|
context += f"| {name} | {operator} | {value} |\n"
|
|
context += "\n"
|
|
|
|
# Algorithm
|
|
if "algorithm" in node_types:
|
|
algo = node_types["algorithm"][0]
|
|
data = algo.get("data", {})
|
|
context += "## Algorithm\n"
|
|
context += f"- **Method**: {data.get('method', 'Not set')}\n"
|
|
context += f"- **Max Trials**: {data.get('maxTrials', 'Not set')}\n"
|
|
if data.get("sigma0"):
|
|
context += f"- **CMA-ES Sigma0**: {data.get('sigma0')}\n"
|
|
if data.get("restartStrategy"):
|
|
context += f"- **Restart Strategy**: {data.get('restartStrategy')}\n"
|
|
context += "\n"
|
|
|
|
# Surrogate
|
|
if "surrogate" in node_types:
|
|
sur = node_types["surrogate"][0]
|
|
data = sur.get("data", {})
|
|
context += "## Surrogate\n"
|
|
context += f"- **Enabled**: {data.get('enabled', False)}\n"
|
|
context += f"- **Type**: {data.get('modelType', 'Not set')}\n"
|
|
context += f"- **Min Trials**: {data.get('minTrials', 'Not set')}\n\n"
|
|
|
|
# Edge connections summary
|
|
context += "## Connections\n\n"
|
|
context += f"Total edges: {len(edges)}\n"
|
|
context += "Flow: Design Variables → Model → Solver → Extractors → Objectives/Constraints → Algorithm\n\n"
|
|
|
|
# Canvas modification instructions
|
|
context += """## Canvas Modification Tools
|
|
|
|
When the user asks to modify the canvas (add/remove nodes, change values), use these MCP tools:
|
|
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
|
|
- `canvas_update_node` - Update node properties (bounds, weights, names)
|
|
- `canvas_remove_node` - Remove a node from the canvas
|
|
- `canvas_connect_nodes` - Create an edge between nodes
|
|
|
|
**Example user requests you can handle:**
|
|
- "Add a design variable called hole_diameter with range 5-15 mm" → Use canvas_add_node
|
|
- "Change the weight of wfe_40_20 to 8" → Use canvas_update_node
|
|
- "Remove the constraint node" → Use canvas_remove_node
|
|
- "Connect the new extractor to the objective" → Use canvas_connect_nodes
|
|
|
|
Always respond with confirmation of changes made to the canvas.
|
|
"""
|
|
|
|
return context
|
|
|
|
def _mode_instructions(self, mode: str) -> str:
|
|
"""Mode-specific instructions"""
|
|
if mode == "power":
|
|
return """# Power Mode Instructions
|
|
|
|
You have **full access** to Atomizer's codebase. You can:
|
|
- Edit any file using `edit_file` tool
|
|
- Create new files with `create_file` tool
|
|
- Create new extractors with `create_extractor` tool
|
|
- Run shell commands with `run_shell_command` tool
|
|
- Search codebase with `search_codebase` tool
|
|
- Commit and push changes
|
|
|
|
**Use these powers responsibly.** Always explain what you're doing and why.
|
|
|
|
For routine operations (list, status, run, analyze), use the standard tools.
|
|
"""
|
|
else:
|
|
return """# User Mode Instructions
|
|
|
|
You can help with optimization workflows:
|
|
- Create and configure studies
|
|
- Run optimizations
|
|
- Analyze results
|
|
- Generate reports
|
|
- Explain FEA concepts
|
|
|
|
**For code modifications**, suggest switching to Power Mode.
|
|
|
|
Available tools:
|
|
- `list_studies`, `get_study_status`, `create_study`
|
|
- `run_optimization`, `stop_optimization`, `get_optimization_status`
|
|
- `get_trial_data`, `analyze_convergence`, `compare_trials`, `get_best_design`
|
|
- `generate_report`, `export_data`
|
|
- `explain_physics`, `recommend_method`, `query_extractors`
|
|
|
|
**Canvas Tools (for visual workflow builder):**
|
|
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
|
|
- `execute_canvas_intent` - Create a study from a canvas intent
|
|
- `interpret_canvas_intent` - Analyze intent and provide recommendations
|
|
|
|
When you receive a message containing "INTENT:" followed by JSON, this is from the Canvas UI.
|
|
Parse the intent and use the appropriate canvas tool to process it.
|
|
"""
|