Files
Atomizer/atomizer-dashboard/backend/api/services/context_builder.py
Anto01 ba0b9a1fae feat(dashboard): Enhanced chat, spec management, and Claude integration
Backend:
- spec.py: New AtomizerSpec REST API endpoints
- spec_manager.py: SpecManager service for unified config
- interview_engine.py: Study creation interview logic
- claude.py: Enhanced Claude API with context
- optimization.py: Extended optimization endpoints
- context_builder.py, session_manager.py: Improved services

Frontend:
- Chat components: Enhanced message rendering, tool call cards
- Hooks: useClaudeCode, useSpecWebSocket, improved useChat
- Pages: Updated Dashboard, Analysis, Insights, Setup, Home
- Components: ParallelCoordinatesPlot, ParetoPlot improvements
- App.tsx: Route updates for canvas/studio

Infrastructure:
- vite.config.ts: Build configuration updates
- start/stop-dashboard.bat: Script improvements
2026-01-20 13:10:47 -05:00

532 lines
22 KiB
Python

"""
Context Builder
Builds rich context prompts for Claude sessions based on mode and study.
"""
import json
import sqlite3
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional
# Atomizer root directory
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
class ContextBuilder:
"""Builds context prompts for Claude sessions"""
def __init__(self):
self.atomizer_root = ATOMIZER_ROOT
self.studies_dir = ATOMIZER_ROOT / "studies"
def build(
self,
mode: Literal["user", "power"],
study_id: Optional[str] = None,
conversation_history: Optional[List[Dict[str, Any]]] = None,
canvas_state: Optional[Dict[str, Any]] = None,
) -> str:
"""
Build full system prompt with context.
Args:
mode: "user" for safe operations, "power" for full access
study_id: Optional study name to provide context for
conversation_history: Optional recent messages for continuity
canvas_state: Optional canvas state (nodes, edges) from the UI
Returns:
Complete system prompt string
"""
parts = [self._base_context(mode)]
# Canvas context takes priority - if user is working on a canvas, include it
if canvas_state:
node_count = len(canvas_state.get("nodes", []))
print(f"[ContextBuilder] Including canvas context with {node_count} nodes")
parts.append(self._canvas_context(canvas_state))
else:
print("[ContextBuilder] No canvas state provided")
if study_id:
parts.append(self._study_context(study_id))
else:
parts.append(self._global_context())
if conversation_history:
parts.append(self._conversation_context(conversation_history))
parts.append(self._mode_instructions(mode))
return "\n\n---\n\n".join(parts)
def build_study_context(self, study_id: str) -> str:
"""Build just the study context (for updates)"""
return self._study_context(study_id)
def _base_context(self, mode: str) -> str:
"""Base identity and capabilities"""
return f"""# Atomizer Assistant
You are the Atomizer Assistant - an expert system for structural optimization using FEA.
**Current Mode**: {mode.upper()}
Your role:
- Help engineers with FEA optimization workflows
- Create, configure, and run optimization studies
- Analyze results and provide insights
- Explain FEA concepts and methodology
Important guidelines:
- Be concise and professional
- Use technical language appropriate for engineers
- You are "Atomizer Assistant", not a generic AI
- Use the available MCP tools to perform actions
- When asked about studies, use the appropriate tools to get real data
"""
def _study_context(self, study_id: str) -> str:
"""Context for a specific study"""
study_dir = self.studies_dir / study_id
if not study_dir.exists():
return f"# Current Study: {study_id}\n\n**Status**: Study directory not found."
context = f"# Current Study: {study_id}\n\n"
# Check for AtomizerSpec v2.0 first (preferred)
spec_path = study_dir / "1_setup" / "atomizer_spec.json"
if not spec_path.exists():
spec_path = study_dir / "atomizer_spec.json"
if spec_path.exists():
context += self._spec_context(spec_path)
else:
# Fall back to legacy optimization_config.json
context += self._legacy_config_context(study_dir)
# Check for results
db_path = study_dir / "3_results" / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(db_path)
count = conn.execute(
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
).fetchone()[0]
best = conn.execute("""
SELECT MIN(tv.value) FROM trial_values tv
JOIN trials t ON tv.trial_id = t.trial_id
WHERE t.state = 'COMPLETE'
""").fetchone()[0]
context += f"\n## Results Status\n\n"
context += f"- **Trials completed**: {count}\n"
if best is not None:
context += f"- **Best objective**: {best:.6f}\n"
conn.close()
except Exception:
pass
return context
def _spec_context(self, spec_path: Path) -> str:
"""Build context from AtomizerSpec v2.0 file"""
context = "**Format**: AtomizerSpec v2.0\n\n"
try:
with open(spec_path) as f:
spec = json.load(f)
context += "## Configuration\n\n"
# Design variables
dvs = spec.get("design_variables", [])
if dvs:
context += "**Design Variables:**\n"
for dv in dvs[:10]:
bounds = dv.get("bounds", {})
bound_str = f"[{bounds.get('min', '?')}, {bounds.get('max', '?')}]"
enabled = "" if dv.get("enabled", True) else ""
context += f"- {dv.get('name', 'unnamed')}: {bound_str} {enabled}\n"
if len(dvs) > 10:
context += f"- ... and {len(dvs) - 10} more\n"
# Extractors
extractors = spec.get("extractors", [])
if extractors:
context += "\n**Extractors:**\n"
for ext in extractors:
ext_type = ext.get("type", "unknown")
outputs = ext.get("outputs", [])
output_names = [o.get("name", "?") for o in outputs[:3]]
builtin = "builtin" if ext.get("builtin", True) else "custom"
context += f"- {ext.get('name', 'unnamed')} ({ext_type}, {builtin}): outputs {output_names}\n"
# Objectives
objs = spec.get("objectives", [])
if objs:
context += "\n**Objectives:**\n"
for obj in objs:
direction = obj.get("direction", "minimize")
weight = obj.get("weight", 1.0)
context += f"- {obj.get('name', 'unnamed')} ({direction}, weight={weight})\n"
# Constraints
constraints = spec.get("constraints", [])
if constraints:
context += "\n**Constraints:**\n"
for c in constraints:
op = c.get("operator", "<=")
thresh = c.get("threshold", "?")
context += f"- {c.get('name', 'unnamed')}: {op} {thresh}\n"
# Optimization settings
opt = spec.get("optimization", {})
algo = opt.get("algorithm", {})
budget = opt.get("budget", {})
method = algo.get("type", "TPE")
max_trials = budget.get("max_trials", "not set")
context += f"\n**Optimization**: {method}, max_trials: {max_trials}\n"
# Surrogate
surrogate = opt.get("surrogate", {})
if surrogate.get("enabled"):
sur_type = surrogate.get("type", "gaussian_process")
context += f"**Surrogate**: {sur_type} enabled\n"
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Spec file exists but could not be parsed: {e}*\n"
return context
def _legacy_config_context(self, study_dir: Path) -> str:
"""Build context from legacy optimization_config.json"""
context = "**Format**: Legacy optimization_config.json\n\n"
config_path = study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = study_dir / "optimization_config.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
context += "## Configuration\n\n"
# Design variables
dvs = config.get("design_variables", [])
if dvs:
context += "**Design Variables:**\n"
for dv in dvs[:10]:
bounds = f"[{dv.get('lower', '?')}, {dv.get('upper', '?')}]"
context += f"- {dv.get('name', 'unnamed')}: {bounds}\n"
if len(dvs) > 10:
context += f"- ... and {len(dvs) - 10} more\n"
# Objectives
objs = config.get("objectives", [])
if objs:
context += "\n**Objectives:**\n"
for obj in objs:
direction = obj.get("direction", "minimize")
context += f"- {obj.get('name', 'unnamed')} ({direction})\n"
# Constraints
constraints = config.get("constraints", [])
if constraints:
context += "\n**Constraints:**\n"
for c in constraints:
context += f"- {c.get('name', 'unnamed')}: {c.get('type', 'unknown')}\n"
# Method
method = config.get("method", "TPE")
max_trials = config.get("max_trials", "not set")
context += f"\n**Method**: {method}, max_trials: {max_trials}\n"
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Config file exists but could not be parsed: {e}*\n"
else:
context += "*No configuration file found.*\n"
return context
def _global_context(self) -> str:
"""Context when no study is selected"""
context = "# Available Studies\n\n"
if self.studies_dir.exists():
studies = [
d.name
for d in self.studies_dir.iterdir()
if d.is_dir() and not d.name.startswith("_")
]
if studies:
context += "The following studies are available:\n\n"
for name in sorted(studies)[:20]:
context += f"- {name}\n"
if len(studies) > 20:
context += f"\n... and {len(studies) - 20} more\n"
else:
context += "No studies found. Use `create_study` tool to create one.\n"
else:
context += "Studies directory not found.\n"
context += "\n## Quick Actions\n\n"
context += "- **Create study**: Describe what you want to optimize\n"
context += "- **List studies**: Use `list_studies` tool for details\n"
context += "- **Open study**: Ask about a specific study by name\n"
return context
def _conversation_context(self, history: List[Dict[str, Any]]) -> str:
"""Recent conversation for continuity"""
if not history:
return ""
context = "# Recent Conversation\n\n"
for msg in history[-10:]:
role = "User" if msg.get("role") == "user" else "Assistant"
content = msg.get("content", "")[:500]
if len(msg.get("content", "")) > 500:
content += "..."
context += f"**{role}**: {content}\n\n"
return context
def _canvas_context(self, canvas_state: Dict[str, Any]) -> str:
"""
Build context from canvas state (nodes and edges).
This is CRITICAL for Claude to understand the current workflow
being built in the Canvas UI.
"""
context = "# Current Canvas State\n\n"
context += "**You are assisting the user with a Canvas Builder workflow.**\n"
context += "The canvas represents an optimization pipeline being configured visually.\n\n"
nodes = canvas_state.get("nodes", [])
edges = canvas_state.get("edges", [])
study_name = canvas_state.get("studyName", "Untitled")
study_path = canvas_state.get("studyPath", None)
context += f"**Study Name**: {study_name}\n"
if study_path:
context += f"**Study Path**: {study_path}\n"
context += "\n"
# Group nodes by type
node_types = {}
for node in nodes:
node_type = node.get("type", "unknown")
if node_type not in node_types:
node_types[node_type] = []
node_types[node_type].append(node)
# Model node
if "model" in node_types:
model = node_types["model"][0]
data = model.get("data", {})
context += "## Model\n"
context += f"- **Label**: {data.get('label', 'Model')}\n"
context += f"- **File Path**: {data.get('filePath', 'Not set')}\n"
context += f"- **File Type**: {data.get('fileType', 'Not set')}\n\n"
# Solver node
if "solver" in node_types:
solver = node_types["solver"][0]
data = solver.get("data", {})
context += "## Solver\n"
context += f"- **Type**: {data.get('solverType', 'Not set')}\n\n"
# Design variables
if "designVar" in node_types:
context += "## Design Variables\n\n"
context += "| Name | Expression | Min | Max | Baseline | Unit | Enabled |\n"
context += "|------|------------|-----|-----|----------|------|---------|\n"
for dv in node_types["designVar"]:
data = dv.get("data", {})
name = data.get("label", "?")
expr = data.get("expressionName", data.get("label", "?"))
min_val = data.get("minValue", "?")
max_val = data.get("maxValue", "?")
baseline = data.get("baseline", "-")
unit = data.get("unit", "-")
enabled = "" if data.get("enabled", True) else ""
context += f"| {name} | {expr} | {min_val} | {max_val} | {baseline} | {unit} | {enabled} |\n"
context += "\n"
# Extractors
if "extractor" in node_types:
context += "## Extractors\n\n"
for ext in node_types["extractor"]:
data = ext.get("data", {})
context += f"### {data.get('extractorName', data.get('label', 'Extractor'))}\n"
context += f"- **ID**: {data.get('extractorId', 'Not set')}\n"
context += f"- **Type**: {data.get('extractorType', 'Not set')}\n"
if data.get("extractMethod"):
context += f"- **Method**: {data.get('extractMethod')}\n"
if data.get("innerRadius"):
context += f"- **Inner Radius**: {data.get('innerRadius')}\n"
if data.get("nModes"):
context += f"- **Zernike Modes**: {data.get('nModes')}\n"
if data.get("subcases"):
context += f"- **Subcases**: {data.get('subcases')}\n"
if data.get("config"):
config = data.get("config", {})
if config.get("subcaseLabels"):
context += f"- **Subcase Labels**: {config.get('subcaseLabels')}\n"
if config.get("referenceSubcase"):
context += f"- **Reference Subcase**: {config.get('referenceSubcase')}\n"
context += "\n"
# Objectives
if "objective" in node_types:
context += "## Objectives\n\n"
context += "| Name | Direction | Weight | Penalty |\n"
context += "|------|-----------|--------|---------|\n"
for obj in node_types["objective"]:
data = obj.get("data", {})
name = data.get("name", data.get("label", "?"))
direction = data.get("direction", "minimize")
weight = data.get("weight", 1)
penalty = data.get("penaltyWeight", "-")
context += f"| {name} | {direction} | {weight} | {penalty} |\n"
context += "\n"
# Constraints
if "constraint" in node_types:
context += "## Constraints\n\n"
context += "| Name | Operator | Value |\n"
context += "|------|----------|-------|\n"
for con in node_types["constraint"]:
data = con.get("data", {})
name = data.get("name", data.get("label", "?"))
operator = data.get("operator", "?")
value = data.get("value", "?")
context += f"| {name} | {operator} | {value} |\n"
context += "\n"
# Algorithm
if "algorithm" in node_types:
algo = node_types["algorithm"][0]
data = algo.get("data", {})
context += "## Algorithm\n"
context += f"- **Method**: {data.get('method', 'Not set')}\n"
context += f"- **Max Trials**: {data.get('maxTrials', 'Not set')}\n"
if data.get("sigma0"):
context += f"- **CMA-ES Sigma0**: {data.get('sigma0')}\n"
if data.get("restartStrategy"):
context += f"- **Restart Strategy**: {data.get('restartStrategy')}\n"
context += "\n"
# Surrogate
if "surrogate" in node_types:
sur = node_types["surrogate"][0]
data = sur.get("data", {})
context += "## Surrogate\n"
context += f"- **Enabled**: {data.get('enabled', False)}\n"
context += f"- **Type**: {data.get('modelType', 'Not set')}\n"
context += f"- **Min Trials**: {data.get('minTrials', 'Not set')}\n\n"
# Edge connections summary
context += "## Connections\n\n"
context += f"Total edges: {len(edges)}\n"
context += "Flow: Design Variables → Model → Solver → Extractors → Objectives/Constraints → Algorithm\n\n"
# Canvas modification instructions
context += """## Canvas Modification Tools
**For AtomizerSpec v2.0 studies (preferred):**
Use spec tools when working with v2.0 studies (check if study uses `atomizer_spec.json`):
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
**For Legacy Canvas (optimization_config.json):**
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
- `canvas_update_node` - Update node properties (bounds, weights, names)
- `canvas_remove_node` - Remove a node from the canvas
- `canvas_connect_nodes` - Create an edge between nodes
**Example user requests you can handle:**
- "Add a design variable called hole_diameter with range 5-15 mm" → Use spec_add_node or canvas_add_node
- "Change the weight of wfe_40_20 to 8" → Use spec_modify or canvas_update_node
- "Remove the constraint node" → Use spec_remove_node or canvas_remove_node
- "Add a custom extractor that computes stress ratio" → Use spec_add_custom_extractor
Always respond with confirmation of changes made to the canvas/spec.
"""
return context
def _mode_instructions(self, mode: str) -> str:
"""Mode-specific instructions"""
if mode == "power":
return """# Power Mode Instructions
You have **FULL ACCESS** to modify Atomizer studies. **DO NOT ASK FOR PERMISSION** - just do it.
## Direct Actions (no confirmation needed):
- **Add design variables**: Use `canvas_add_node` or `spec_add_node` with node_type="designVar"
- **Add extractors**: Use `canvas_add_node` with node_type="extractor"
- **Add objectives**: Use `canvas_add_node` with node_type="objective"
- **Add constraints**: Use `canvas_add_node` with node_type="constraint"
- **Update node properties**: Use `canvas_update_node` or `spec_modify`
- **Remove nodes**: Use `canvas_remove_node`
- **Edit atomizer_spec.json directly**: Use the Edit tool
## For custom extractors with Python code:
Use `spec_add_custom_extractor` to add a custom function.
## IMPORTANT:
- You have --dangerously-skip-permissions enabled
- The user has explicitly granted you power mode access
- **ACT IMMEDIATELY** when asked to add/modify/remove things
- Explain what you did AFTER doing it, not before
- Do NOT say "I need permission" - you already have it
Example: If user says "add a volume extractor", immediately use canvas_add_node to add it.
"""
else:
return """# User Mode Instructions
You can help with optimization workflows:
- Create and configure studies
- Run optimizations
- Analyze results
- Generate reports
- Explain FEA concepts
**For code modifications**, suggest switching to Power Mode.
Available tools:
- `list_studies`, `get_study_status`, `create_study`
- `run_optimization`, `stop_optimization`, `get_optimization_status`
- `get_trial_data`, `analyze_convergence`, `compare_trials`, `get_best_design`
- `generate_report`, `export_data`
- `explain_physics`, `recommend_method`, `query_extractors`
**AtomizerSpec v2.0 Tools (preferred for new studies):**
- `spec_get` - Get the full AtomizerSpec for a study
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_validate` - Validate spec against JSON Schema
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
- `spec_create_from_description` - Create a new study from natural language description
**Canvas Tools (for visual workflow builder):**
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
- `execute_canvas_intent` - Create a study from a canvas intent
- `interpret_canvas_intent` - Analyze intent and provide recommendations
When you receive a message containing "INTENT:" followed by JSON, this is from the Canvas UI.
Parse the intent and use the appropriate canvas tool to process it.
"""