diff --git a/atomizer-dashboard/backend/api/main.py b/atomizer-dashboard/backend/api/main.py
index 148089b9..d130613b 100644
--- a/atomizer-dashboard/backend/api/main.py
+++ b/atomizer-dashboard/backend/api/main.py
@@ -13,7 +13,7 @@ import sys
# Add parent directory to path to import optimization_engine
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
-from api.routes import optimization, claude, terminal, insights, context, files, nx
+from api.routes import optimization, claude, terminal, insights, context, files, nx, claude_code, spec
from api.websocket import optimization_stream
@@ -60,6 +60,9 @@ app.include_router(insights.router, prefix="/api/insights", tags=["insights"])
app.include_router(context.router, prefix="/api/context", tags=["context"])
app.include_router(files.router, prefix="/api/files", tags=["files"])
app.include_router(nx.router, prefix="/api/nx", tags=["nx"])
+app.include_router(claude_code.router, prefix="/api", tags=["claude-code"])
+app.include_router(spec.router, prefix="/api", tags=["spec"])
+app.include_router(spec.validate_router, prefix="/api", tags=["spec"])
@app.get("/")
async def root():
diff --git a/atomizer-dashboard/backend/api/routes/claude.py b/atomizer-dashboard/backend/api/routes/claude.py
index 0be8b556..ffe7bbc2 100644
--- a/atomizer-dashboard/backend/api/routes/claude.py
+++ b/atomizer-dashboard/backend/api/routes/claude.py
@@ -187,7 +187,15 @@ async def session_websocket(websocket: WebSocket, session_id: str):
continue
# Get canvas state from message or use stored state
- canvas_state = data.get("canvas_state") or current_canvas_state
+ msg_canvas = data.get("canvas_state")
+ canvas_state = msg_canvas if msg_canvas is not None else current_canvas_state
+
+ # Debug logging
+ if canvas_state:
+ node_count = len(canvas_state.get("nodes", []))
+ print(f"[Claude WS] Sending message with canvas state: {node_count} nodes")
+ else:
+ print("[Claude WS] Sending message WITHOUT canvas state")
async for chunk in manager.send_message(
session_id,
@@ -401,6 +409,175 @@ async def websocket_chat(websocket: WebSocket):
pass
+# ========== POWER MODE: Direct API with Write Tools ==========
+
+@router.websocket("/sessions/{session_id}/ws/power")
+async def power_mode_websocket(websocket: WebSocket, session_id: str):
+ """
+ WebSocket for power mode chat using direct Anthropic API with write tools.
+
+ Unlike the regular /ws endpoint which uses Claude CLI + MCP,
+ this uses AtomizerClaudeAgent directly with built-in write tools.
+ This allows immediate modifications without permission prompts.
+
+ Message formats (client -> server):
+ {"type": "message", "content": "user message"}
+ {"type": "set_study", "study_id": "study_name"}
+ {"type": "ping"}
+
+ Message formats (server -> client):
+ {"type": "text", "content": "..."}
+ {"type": "tool_call", "tool": "...", "input": {...}}
+ {"type": "tool_result", "result": "..."}
+ {"type": "done", "tool_calls": [...]}
+ {"type": "error", "message": "..."}
+ {"type": "spec_modified", "changes": [...]}
+ {"type": "pong"}
+ """
+ await websocket.accept()
+
+ manager = get_session_manager()
+ session = manager.get_session(session_id)
+
+ if not session:
+ await websocket.send_json({"type": "error", "message": "Session not found"})
+ await websocket.close()
+ return
+
+ # Import AtomizerClaudeAgent for direct API access
+ from api.services.claude_agent import AtomizerClaudeAgent
+
+ # Create agent with study context
+ agent = AtomizerClaudeAgent(study_id=session.study_id)
+ conversation_history: List[Dict[str, Any]] = []
+
+ # Load initial spec and set canvas state so Claude sees current canvas
+ initial_spec = agent.load_current_spec()
+ if initial_spec:
+ # Send initial spec to frontend
+ await websocket.send_json({
+ "type": "spec_updated",
+ "spec": initial_spec,
+ "reason": "initial_load"
+ })
+
+ try:
+ while True:
+ data = await websocket.receive_json()
+
+ if data.get("type") == "message":
+ content = data.get("content", "")
+ if not content:
+ continue
+
+ try:
+ # Use streaming API with tool support for real-time response
+ last_tool_calls = []
+ async for event in agent.chat_stream_with_tools(content, conversation_history):
+ event_type = event.get("type")
+
+ if event_type == "text":
+ # Stream text tokens to frontend immediately
+ await websocket.send_json({
+ "type": "text",
+ "content": event.get("content", ""),
+ })
+
+ elif event_type == "tool_call":
+ # Tool is being called
+ tool_info = event.get("tool", {})
+ await websocket.send_json({
+ "type": "tool_call",
+ "tool": tool_info,
+ })
+
+ elif event_type == "tool_result":
+ # Tool finished executing
+ tool_name = event.get("tool", "")
+ await websocket.send_json({
+ "type": "tool_result",
+ "tool": tool_name,
+ "result": event.get("result", ""),
+ })
+
+ # If it was a write tool, send full updated spec
+ if tool_name in ["add_design_variable", "add_extractor",
+ "add_objective", "add_constraint",
+ "update_spec_field", "remove_node",
+ "create_study"]:
+ # Load updated spec and update agent's canvas state
+ updated_spec = agent.load_current_spec()
+ if updated_spec:
+ await websocket.send_json({
+ "type": "spec_updated",
+ "tool": tool_name,
+ "spec": updated_spec, # Full spec for direct canvas update
+ })
+
+ elif event_type == "done":
+ # Streaming complete
+ last_tool_calls = event.get("tool_calls", [])
+ await websocket.send_json({
+ "type": "done",
+ "tool_calls": last_tool_calls,
+ })
+
+ # Update conversation history for next message
+ # Note: For proper history tracking, we'd need to store messages properly
+ # For now, we append the user message and response
+ conversation_history.append({"role": "user", "content": content})
+ conversation_history.append({"role": "assistant", "content": event.get("response", "")})
+
+ except Exception as e:
+ import traceback
+ traceback.print_exc()
+ await websocket.send_json({
+ "type": "error",
+ "message": str(e),
+ })
+
+ elif data.get("type") == "canvas_edit":
+ # User made a manual edit to the canvas - update Claude's context
+ spec = data.get("spec")
+ if spec:
+ agent.set_canvas_state(spec)
+ await websocket.send_json({
+ "type": "canvas_edit_received",
+ "acknowledged": True
+ })
+
+ elif data.get("type") == "set_study":
+ study_id = data.get("study_id")
+ if study_id:
+ await manager.set_study_context(session_id, study_id)
+ # Recreate agent with new study context
+ agent = AtomizerClaudeAgent(study_id=study_id)
+ conversation_history = [] # Clear history on study change
+ # Load spec for new study
+ new_spec = agent.load_current_spec()
+ await websocket.send_json({
+ "type": "context_updated",
+ "study_id": study_id,
+ })
+ if new_spec:
+ await websocket.send_json({
+ "type": "spec_updated",
+ "spec": new_spec,
+ "reason": "study_change"
+ })
+
+ elif data.get("type") == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ except WebSocketDisconnect:
+ pass
+ except Exception as e:
+ try:
+ await websocket.send_json({"type": "error", "message": str(e)})
+ except:
+ pass
+
+
@router.get("/suggestions")
async def get_chat_suggestions(study_id: Optional[str] = None):
"""
diff --git a/atomizer-dashboard/backend/api/routes/optimization.py b/atomizer-dashboard/backend/api/routes/optimization.py
index 647ed2bf..f4d10568 100644
--- a/atomizer-dashboard/backend/api/routes/optimization.py
+++ b/atomizer-dashboard/backend/api/routes/optimization.py
@@ -38,16 +38,30 @@ def resolve_study_path(study_id: str) -> Path:
"""Find study folder by scanning all topic directories.
Supports nested folder structure: studies/Topic/study_name/
- Study ID is the short name (e.g., 'm1_mirror_adaptive_V14')
+ Study ID can be:
+ - Short name (e.g., 'm1_mirror_adaptive_V14') - scans all topic folders
+ - Full nested path (e.g., 'M1_Mirror/m1_mirror_cost_reduction_lateral')
Returns the full path to the study directory.
Raises HTTPException 404 if not found.
"""
+ # Handle nested path format (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
+ if "/" in study_id:
+ # Try with forward slashes
+ nested_path = STUDIES_DIR / study_id
+ if nested_path.exists() and nested_path.is_dir():
+ if _is_valid_study_dir(nested_path):
+ return nested_path
+ # Try with backslashes (Windows path)
+ nested_path = STUDIES_DIR / study_id.replace("/", "\\")
+ if nested_path.exists() and nested_path.is_dir():
+ if _is_valid_study_dir(nested_path):
+ return nested_path
+
# First check direct path (backwards compatibility for flat structure)
direct_path = STUDIES_DIR / study_id
if direct_path.exists() and direct_path.is_dir():
- # Verify it's actually a study (has 1_setup or config)
- if (direct_path / "1_setup").exists() or (direct_path / "optimization_config.json").exists():
+ if _is_valid_study_dir(direct_path):
return direct_path
# Scan topic folders for nested structure
@@ -55,13 +69,21 @@ def resolve_study_path(study_id: str) -> Path:
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
study_dir = topic_dir / study_id
if study_dir.exists() and study_dir.is_dir():
- # Verify it's actually a study
- if (study_dir / "1_setup").exists() or (study_dir / "optimization_config.json").exists():
+ if _is_valid_study_dir(study_dir):
return study_dir
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
+def _is_valid_study_dir(study_dir: Path) -> bool:
+ """Check if a directory is a valid study directory."""
+ return (
+ (study_dir / "1_setup").exists() or
+ (study_dir / "optimization_config.json").exists() or
+ (study_dir / "atomizer_spec.json").exists()
+ )
+
+
def get_study_topic(study_dir: Path) -> Optional[str]:
"""Get the topic folder name for a study, or None if in root."""
# Check if parent is a topic folder (not the root studies dir)
@@ -1542,16 +1564,17 @@ async def get_study_image(study_id: str, image_path: str):
raise HTTPException(status_code=500, detail=f"Failed to serve image: {str(e)}")
-@router.get("/studies/{study_id}/config")
+@router.get("/studies/{study_id:path}/config")
async def get_study_config(study_id: str):
"""
- Get the full optimization_config.json for a study
+ Get the study configuration - reads from atomizer_spec.json (v2.0) first,
+ falls back to legacy optimization_config.json if not found.
Args:
study_id: Study identifier
Returns:
- JSON with the complete configuration
+ JSON with the complete configuration in a unified format
"""
try:
study_dir = resolve_study_path(study_id)
@@ -1559,7 +1582,22 @@ async def get_study_config(study_id: str):
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
- # Look for config in various locations
+ # Priority 1: atomizer_spec.json (v2.0 unified format)
+ spec_file = study_dir / "atomizer_spec.json"
+ if spec_file.exists():
+ with open(spec_file) as f:
+ spec = json.load(f)
+
+ # Transform AtomizerSpec to the expected config format
+ config = _transform_spec_to_config(spec, study_id)
+ return {
+ "config": config,
+ "path": str(spec_file),
+ "study_id": study_id,
+ "source": "atomizer_spec"
+ }
+
+ # Priority 2: Legacy optimization_config.json
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "optimization_config.json"
@@ -1573,7 +1611,8 @@ async def get_study_config(study_id: str):
return {
"config": config,
"path": str(config_file),
- "study_id": study_id
+ "study_id": study_id,
+ "source": "legacy_config"
}
except HTTPException:
@@ -1582,6 +1621,118 @@ async def get_study_config(study_id: str):
raise HTTPException(status_code=500, detail=f"Failed to read config: {str(e)}")
+def _transform_spec_to_config(spec: dict, study_id: str) -> dict:
+ """Transform AtomizerSpec v2.0 format to legacy config format for backwards compatibility."""
+ meta = spec.get("meta", {})
+ model = spec.get("model", {})
+ optimization = spec.get("optimization", {})
+
+ # Transform design variables
+ design_variables = []
+ for dv in spec.get("design_variables", []):
+ bounds = dv.get("bounds", {})
+ design_variables.append({
+ "name": dv.get("name"),
+ "expression_name": dv.get("expression_name"),
+ "type": "float" if dv.get("type") == "continuous" else dv.get("type", "float"),
+ "min": bounds.get("min"),
+ "max": bounds.get("max"),
+ "low": bounds.get("min"), # Alias for compatibility
+ "high": bounds.get("max"), # Alias for compatibility
+ "baseline": dv.get("baseline"),
+ "unit": dv.get("units"),
+ "units": dv.get("units"),
+ "enabled": dv.get("enabled", True)
+ })
+
+ # Transform objectives
+ objectives = []
+ for obj in spec.get("objectives", []):
+ source = obj.get("source", {})
+ objectives.append({
+ "name": obj.get("name"),
+ "direction": obj.get("direction", "minimize"),
+ "weight": obj.get("weight", 1.0),
+ "target": obj.get("target"),
+ "unit": obj.get("units"),
+ "units": obj.get("units"),
+ "extractor_id": source.get("extractor_id"),
+ "output_key": source.get("output_key")
+ })
+
+ # Transform constraints
+ constraints = []
+ for con in spec.get("constraints", []):
+ constraints.append({
+ "name": con.get("name"),
+ "type": _operator_to_type(con.get("operator", "<=")),
+ "operator": con.get("operator"),
+ "max_value": con.get("threshold") if con.get("operator") in ["<=", "<"] else None,
+ "min_value": con.get("threshold") if con.get("operator") in [">=", ">"] else None,
+ "bound": con.get("threshold"),
+ "unit": con.get("units"),
+ "units": con.get("units")
+ })
+
+ # Transform extractors
+ extractors = []
+ for ext in spec.get("extractors", []):
+ extractors.append({
+ "name": ext.get("name"),
+ "type": ext.get("type"),
+ "builtin": ext.get("builtin", True),
+ "config": ext.get("config", {}),
+ "outputs": ext.get("outputs", [])
+ })
+
+ # Get algorithm info
+ algorithm = optimization.get("algorithm", {})
+ budget = optimization.get("budget", {})
+
+ # Build the config in legacy format
+ config = {
+ "study_name": meta.get("study_name", study_id),
+ "description": meta.get("description", ""),
+ "version": meta.get("version", "2.0"),
+ "design_variables": design_variables,
+ "objectives": objectives,
+ "constraints": constraints,
+ "extractors": extractors,
+ "optimization": {
+ "algorithm": algorithm.get("type", "TPE"),
+ "n_trials": budget.get("max_trials", 100),
+ "max_time_hours": budget.get("max_time_hours"),
+ "convergence_patience": budget.get("convergence_patience")
+ },
+ "optimization_settings": {
+ "sampler": algorithm.get("type", "TPE"),
+ "n_trials": budget.get("max_trials", 100)
+ },
+ "algorithm": {
+ "name": "Optuna",
+ "sampler": algorithm.get("type", "TPE"),
+ "n_trials": budget.get("max_trials", 100)
+ },
+ "model": model,
+ "sim_file": model.get("sim", {}).get("path") if isinstance(model.get("sim"), dict) else None
+ }
+
+ return config
+
+
+def _operator_to_type(operator: str) -> str:
+ """Convert constraint operator to legacy type string."""
+ mapping = {
+ "<=": "le",
+ "<": "le",
+ ">=": "ge",
+ ">": "ge",
+ "==": "eq",
+ "=": "eq"
+ }
+ return mapping.get(operator, "le")
+
+
# ============================================================================
# Process Control Endpoints
# ============================================================================
@@ -2851,7 +3002,162 @@ async def get_study_runs(study_id: str):
class UpdateConfigRequest(BaseModel):
- config: dict
+ config: Optional[dict] = None
+ intent: Optional[dict] = None
+
+
+def intent_to_config(intent: dict, existing_config: Optional[dict] = None) -> dict:
+ """
+ Convert canvas intent format to optimization_config.json format.
+
+ Preserves existing config fields that aren't in the intent.
+ """
+ # Start with existing config or empty
+ config = existing_config.copy() if existing_config else {}
+
+ # Metadata
+ if intent.get('model', {}).get('path'):
+ model_path = Path(intent['model']['path']).name
+ if 'simulation' not in config:
+ config['simulation'] = {}
+ config['simulation']['model_file'] = model_path
+ # Try to infer other files from model name
+ base_name = model_path.replace('.prt', '')
+ if not config['simulation'].get('fem_file'):
+ config['simulation']['fem_file'] = f"{base_name}_fem1.fem"
+ if not config['simulation'].get('sim_file'):
+ config['simulation']['sim_file'] = f"{base_name}_sim1.sim"
+
+ # Solver
+ if intent.get('solver', {}).get('type'):
+ solver_type = intent['solver']['type']
+ if 'simulation' not in config:
+ config['simulation'] = {}
+ config['simulation']['solver'] = 'nastran'
+ # Map SOL types to analysis_types
+ sol_to_analysis = {
+ 'SOL101': ['static'],
+ 'SOL103': ['modal'],
+ 'SOL105': ['buckling'],
+ 'SOL106': ['nonlinear'],
+ 'SOL111': ['modal', 'frequency_response'],
+ 'SOL112': ['modal', 'transient'],
+ }
+ config['simulation']['analysis_types'] = sol_to_analysis.get(solver_type, ['static'])
+
+ # Design Variables
+ if intent.get('design_variables'):
+ config['design_variables'] = []
+ for dv in intent['design_variables']:
+ config['design_variables'].append({
+ 'parameter': dv.get('name', dv.get('expression_name', '')),
+ 'bounds': [dv.get('min', 0), dv.get('max', 100)],
+ 'description': dv.get('description', f"Design variable: {dv.get('name', '')}"),
+ })
+
+ # Extractors → used for objectives/constraints extraction
+ extractor_map = {}
+ if intent.get('extractors'):
+ for ext in intent['extractors']:
+ ext_id = ext.get('id', '')
+ ext_name = ext.get('name', '')
+ extractor_map[ext_name] = ext
+
+ # Objectives
+ if intent.get('objectives'):
+ config['objectives'] = []
+ for obj in intent['objectives']:
+ obj_config = {
+ 'name': obj.get('name', 'objective'),
+ 'goal': obj.get('direction', 'minimize'),
+ 'weight': obj.get('weight', 1.0),
+ 'description': obj.get('description', f"Objective: {obj.get('name', '')}"),
+ }
+ # Add extraction config if extractor referenced
+ extractor_name = obj.get('extractor')
+ if extractor_name and extractor_name in extractor_map:
+ ext = extractor_map[extractor_name]
+ ext_config = ext.get('config', {})
+ obj_config['extraction'] = {
+ 'action': _extractor_id_to_action(ext.get('id', '')),
+ 'domain': 'result_extraction',
+ 'params': ext_config,
+ }
+ config['objectives'].append(obj_config)
+
+ # Constraints
+ if intent.get('constraints'):
+ config['constraints'] = []
+ for con in intent['constraints']:
+ op = con.get('operator', '<=')
+ con_type = 'less_than' if '<' in op else 'greater_than' if '>' in op else 'equal_to'
+ con_config = {
+ 'name': con.get('name', 'constraint'),
+ 'type': con_type,
+ 'threshold': con.get('value', 0),
+ 'description': con.get('description', f"Constraint: {con.get('name', '')}"),
+ }
+ # Add extraction config if extractor referenced
+ extractor_name = con.get('extractor')
+ if extractor_name and extractor_name in extractor_map:
+ ext = extractor_map[extractor_name]
+ ext_config = ext.get('config', {})
+ con_config['extraction'] = {
+ 'action': _extractor_id_to_action(ext.get('id', '')),
+ 'domain': 'result_extraction',
+ 'params': ext_config,
+ }
+ config['constraints'].append(con_config)
+
+ # Optimization settings
+ if intent.get('optimization'):
+ opt = intent['optimization']
+ if 'optimization_settings' not in config:
+ config['optimization_settings'] = {}
+ if opt.get('max_trials'):
+ config['optimization_settings']['n_trials'] = opt['max_trials']
+ if opt.get('method'):
+ # Map method names to Optuna sampler names
+ method_map = {
+ 'TPE': 'TPESampler',
+ 'CMA-ES': 'CmaEsSampler',
+ 'NSGA-II': 'NSGAIISampler',
+ 'RandomSearch': 'RandomSampler',
+ 'GP-BO': 'GPSampler',
+ }
+ config['optimization_settings']['sampler'] = method_map.get(opt['method'], opt['method'])
+
+ # Surrogate
+ if intent.get('surrogate', {}).get('enabled'):
+ config['surrogate'] = {
+ 'type': intent['surrogate'].get('type', 'MLP'),
+ 'min_trials': intent['surrogate'].get('min_trials', 20),
+ }
+
+ return config
+
+
+def _extractor_id_to_action(ext_id: str) -> str:
+ """Map extractor IDs (E1, E2, etc.) to extraction action names."""
+ action_map = {
+ 'E1': 'extract_displacement',
+ 'E2': 'extract_frequency',
+ 'E3': 'extract_stress',
+ 'E4': 'extract_mass',
+ 'E5': 'extract_mass',
+ 'E8': 'extract_zernike',
+ 'E9': 'extract_zernike',
+ 'E10': 'extract_zernike',
+ 'displacement': 'extract_displacement',
+ 'frequency': 'extract_frequency',
+ 'stress': 'extract_stress',
+ 'mass': 'extract_mass',
+ 'mass_bdf': 'extract_mass',
+ 'mass_cad': 'extract_mass',
+ 'zernike': 'extract_zernike',
+ 'zernike_opd': 'extract_zernike',
+ }
+ return action_map.get(ext_id, 'extract_displacement')
@router.put("/studies/{study_id}/config")
@@ -2859,9 +3165,13 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
"""
Update the optimization_config.json for a study
+ Accepts either:
+ - {"config": {...}} - Direct config object (overwrites)
+ - {"intent": {...}} - Canvas intent (converted and merged with existing)
+
Args:
study_id: Study identifier
- request: New configuration data
+ request: New configuration data (config or intent)
Returns:
JSON with success status
@@ -2891,9 +3201,24 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
backup_file = config_file.with_suffix('.json.backup')
shutil.copy(config_file, backup_file)
+ # Determine which format was provided
+ if request.config is not None:
+ # Direct config update
+ new_config = request.config
+ elif request.intent is not None:
+ # Convert intent to config, merging with existing
+ with open(config_file, 'r') as f:
+ existing_config = json.load(f)
+ new_config = intent_to_config(request.intent, existing_config)
+ else:
+ raise HTTPException(
+ status_code=400,
+ detail="Request must include either 'config' or 'intent' field"
+ )
+
# Write new config
with open(config_file, 'w') as f:
- json.dump(request.config, f, indent=2)
+ json.dump(new_config, f, indent=2)
return {
"success": True,
diff --git a/atomizer-dashboard/backend/api/routes/spec.py b/atomizer-dashboard/backend/api/routes/spec.py
new file mode 100644
index 00000000..47a5307b
--- /dev/null
+++ b/atomizer-dashboard/backend/api/routes/spec.py
@@ -0,0 +1,646 @@
+"""
+AtomizerSpec v2.0 API Endpoints
+
+REST API for managing AtomizerSpec configurations.
+All spec modifications flow through these endpoints.
+
+Endpoints:
+- GET /studies/{study_id}/spec - Get full spec
+- PUT /studies/{study_id}/spec - Replace entire spec
+- PATCH /studies/{study_id}/spec - Partial update
+- POST /studies/{study_id}/spec/validate - Validate spec
+- POST /studies/{study_id}/spec/nodes - Add node
+- PATCH /studies/{study_id}/spec/nodes/{node_id} - Update node
+- DELETE /studies/{study_id}/spec/nodes/{node_id} - Delete node
+- POST /studies/{study_id}/spec/custom-functions - Add custom extractor
+- WebSocket /studies/{study_id}/spec/sync - Real-time sync
+"""
+
+from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect, Query
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Union
+import json
+import sys
+import asyncio
+
+# Add project root to path
+sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
+
+from api.services.spec_manager import (
+ SpecManager,
+ SpecManagerError,
+ SpecNotFoundError,
+ SpecConflictError,
+ get_spec_manager,
+)
+from optimization_engine.config.spec_models import (
+ AtomizerSpec,
+ ValidationReport,
+)
+from optimization_engine.config.spec_validator import SpecValidationError
+
+router = APIRouter(prefix="/studies/{study_id:path}/spec", tags=["spec"])
+
+# Base studies directory
+STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
+
+
+# ============================================================================
+# Request/Response Models
+# ============================================================================
+
+class SpecPatchRequest(BaseModel):
+ """Request for patching a spec field."""
+ path: str = Field(..., description="JSONPath to the field (e.g., 'objectives[0].weight')")
+ value: Any = Field(..., description="New value")
+ modified_by: str = Field(default="api", description="Who is making the change")
+
+
+class NodeAddRequest(BaseModel):
+ """Request for adding a node."""
+ type: str = Field(..., description="Node type: designVar, extractor, objective, constraint")
+ data: Dict[str, Any] = Field(..., description="Node data")
+ modified_by: str = Field(default="canvas", description="Who is making the change")
+
+
+class NodeUpdateRequest(BaseModel):
+ """Request for updating a node."""
+ updates: Dict[str, Any] = Field(..., description="Fields to update")
+ modified_by: str = Field(default="canvas", description="Who is making the change")
+
+
+class CustomFunctionRequest(BaseModel):
+ """Request for adding a custom extractor function."""
+ name: str = Field(..., description="Function name")
+ code: str = Field(..., description="Python source code")
+ outputs: List[str] = Field(..., description="Output names")
+ description: Optional[str] = Field(default=None, description="Human-readable description")
+ modified_by: str = Field(default="claude", description="Who is making the change")
+
+
+class ExtractorValidationRequest(BaseModel):
+ """Request for validating custom extractor code."""
+ function_name: str = Field(default="extract", description="Expected function name")
+ source: str = Field(..., description="Python source code to validate")
+
+
+class SpecUpdateResponse(BaseModel):
+ """Response for spec modification operations."""
+ success: bool
+ hash: str
+ modified: str
+ modified_by: str
+
+
+class NodeAddResponse(BaseModel):
+ """Response for node add operation."""
+ success: bool
+ node_id: str
+ message: str
+
+
+class ValidationResponse(BaseModel):
+ """Response for validation endpoint."""
+ valid: bool
+ errors: List[Dict[str, Any]]
+ warnings: List[Dict[str, Any]]
+ summary: Dict[str, int]
+
+
+# ============================================================================
+# Helper Functions
+# ============================================================================
+
+def resolve_study_path(study_id: str) -> Path:
+ """Find study folder by scanning all topic directories.
+
+ Supports both formats:
+ - "study_name" - Will scan topic folders to find it
+ - "Topic/study_name" - Direct nested path (e.g., "M1_Mirror/m1_mirror_v1")
+ """
+ # Handle nested paths (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
+ if "/" in study_id:
+ nested_path = STUDIES_DIR / study_id.replace("/", "\\") # Handle Windows paths
+ if nested_path.exists() and nested_path.is_dir():
+ return nested_path
+ # Also try with forward slashes (Path handles both)
+ nested_path = STUDIES_DIR / study_id
+ if nested_path.exists() and nested_path.is_dir():
+ return nested_path
+
+ # Direct path (flat structure)
+ direct_path = STUDIES_DIR / study_id
+ if direct_path.exists() and direct_path.is_dir():
+ return direct_path
+
+ # Scan topic folders (nested structure)
+ for topic_dir in STUDIES_DIR.iterdir():
+ if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
+ study_dir = topic_dir / study_id
+ if study_dir.exists() and study_dir.is_dir():
+ return study_dir
+
+ raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
+
+
+def get_manager(study_id: str) -> SpecManager:
+ """Get SpecManager for a study."""
+ study_path = resolve_study_path(study_id)
+ return get_spec_manager(study_path)
+
+
+# ============================================================================
+# REST Endpoints
+# ============================================================================
+
+@router.get("", response_model=None)
+async def get_spec(study_id: str):
+ """
+ Get the full AtomizerSpec for a study.
+
+ Returns the complete spec JSON with all design variables, extractors,
+ objectives, constraints, and canvas state.
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(
+ status_code=404,
+ detail=f"No AtomizerSpec found for study '{study_id}'. Use migration or create new spec."
+ )
+
+ try:
+ spec = manager.load()
+ return spec.model_dump(mode='json')
+ except SpecValidationError as e:
+ # Return spec even if invalid, but include validation info
+ raw = manager.load_raw()
+ return JSONResponse(
+ status_code=200,
+ content={
+ **raw,
+ "_validation_error": str(e)
+ }
+ )
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/raw")
+async def get_spec_raw(study_id: str):
+ """
+ Get the raw spec JSON without validation.
+
+ Useful for debugging or when spec is invalid.
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ return manager.load_raw()
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/hash")
+async def get_spec_hash(study_id: str):
+ """Get the current spec hash for conflict detection."""
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ return {"hash": manager.get_hash()}
+
+
+@router.put("", response_model=SpecUpdateResponse)
+async def replace_spec(
+ study_id: str,
+ spec: Dict[str, Any],
+ modified_by: str = Query(default="api"),
+ expected_hash: Optional[str] = Query(default=None)
+):
+ """
+ Replace the entire spec.
+
+ Validates the new spec before saving. Optionally check for conflicts
+ using expected_hash parameter.
+ """
+ manager = get_manager(study_id)
+
+ try:
+ new_hash = manager.save(spec, modified_by=modified_by, expected_hash=expected_hash)
+ reloaded = manager.load()
+ return SpecUpdateResponse(
+ success=True,
+ hash=new_hash,
+ modified=reloaded.meta.modified or "",
+ modified_by=modified_by
+ )
+ except SpecConflictError as e:
+ raise HTTPException(
+ status_code=409,
+ detail={
+ "message": str(e),
+ "current_hash": e.current_hash
+ }
+ )
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.patch("", response_model=SpecUpdateResponse)
+async def patch_spec(study_id: str, request: SpecPatchRequest):
+ """
+ Partial update to spec using JSONPath.
+
+ Example paths:
+ - "objectives[0].weight" - Update objective weight
+ - "design_variables[1].bounds.max" - Update DV bound
+ - "meta.description" - Update description
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ spec = manager.patch(request.path, request.value, modified_by=request.modified_by)
+ return SpecUpdateResponse(
+ success=True,
+ hash=manager.get_hash(),
+ modified=spec.meta.modified or "",
+ modified_by=request.modified_by
+ )
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post("/validate", response_model=ValidationResponse)
+async def validate_spec(study_id: str):
+ """
+ Validate the spec and return detailed report.
+
+ Returns errors, warnings, and summary of the spec contents.
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ report = manager.validate_and_report()
+ return ValidationResponse(
+ valid=report.valid,
+ errors=[e.model_dump() for e in report.errors],
+ warnings=[w.model_dump() for w in report.warnings],
+ summary=report.summary.model_dump()
+ )
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Node CRUD Endpoints
+# ============================================================================
+
+@router.post("/nodes", response_model=NodeAddResponse)
+async def add_node(study_id: str, request: NodeAddRequest):
+ """
+ Add a new node to the spec.
+
+ Supported types: designVar, extractor, objective, constraint
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ valid_types = ["designVar", "extractor", "objective", "constraint"]
+ if request.type not in valid_types:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Invalid node type '{request.type}'. Valid: {valid_types}"
+ )
+
+ try:
+ node_id = manager.add_node(request.type, request.data, modified_by=request.modified_by)
+ return NodeAddResponse(
+ success=True,
+ node_id=node_id,
+ message=f"Added {request.type} node: {node_id}"
+ )
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.patch("/nodes/{node_id}")
+async def update_node(study_id: str, node_id: str, request: NodeUpdateRequest):
+ """Update an existing node's properties."""
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ manager.update_node(node_id, request.updates, modified_by=request.modified_by)
+ return {"success": True, "message": f"Updated node {node_id}"}
+ except SpecManagerError as e:
+ raise HTTPException(status_code=404, detail=str(e))
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete("/nodes/{node_id}")
+async def delete_node(
+ study_id: str,
+ node_id: str,
+ modified_by: str = Query(default="canvas")
+):
+ """
+ Delete a node and all edges referencing it.
+
+ Use with caution - this will also remove any objectives or constraints
+ that reference a deleted extractor.
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ manager.remove_node(node_id, modified_by=modified_by)
+ return {"success": True, "message": f"Removed node {node_id}"}
+ except SpecManagerError as e:
+ raise HTTPException(status_code=404, detail=str(e))
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Custom Function Endpoint
+# ============================================================================
+
+@router.post("/custom-functions", response_model=NodeAddResponse)
+async def add_custom_function(study_id: str, request: CustomFunctionRequest):
+ """
+ Add a custom Python function as an extractor.
+
+ The function will be available in the optimization workflow.
+ Claude can use this to add new physics extraction logic.
+ """
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ extractor_id = manager.add_custom_function(
+ name=request.name,
+ code=request.code,
+ outputs=request.outputs,
+ description=request.description,
+ modified_by=request.modified_by
+ )
+ return NodeAddResponse(
+ success=True,
+ node_id=extractor_id,
+ message=f"Added custom extractor: {request.name}"
+ )
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# Separate router for non-study-specific endpoints
+validate_router = APIRouter(prefix="/spec", tags=["spec"])
+
+
+@validate_router.post("/validate-extractor")
+async def validate_custom_extractor(request: ExtractorValidationRequest):
+ """
+ Validate custom extractor Python code.
+
+ Checks syntax, security patterns, and function signature.
+ Does not require a study - can be used before adding to spec.
+ """
+ try:
+ from optimization_engine.extractors.custom_extractor_loader import (
+ validate_extractor_code,
+ ExtractorSecurityError,
+ )
+
+ try:
+ is_valid, errors = validate_extractor_code(request.source, request.function_name)
+ return {
+ "valid": is_valid,
+ "errors": errors
+ }
+ except ExtractorSecurityError as e:
+ return {
+ "valid": False,
+ "errors": [str(e)]
+ }
+
+ except ImportError as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Custom extractor loader not available: {e}"
+ )
+
+
+# ============================================================================
+# Edge Endpoints
+# ============================================================================
+
+@router.post("/edges")
+async def add_edge(
+ study_id: str,
+ source: str = Query(..., description="Source node ID"),
+ target: str = Query(..., description="Target node ID"),
+ modified_by: str = Query(default="canvas")
+):
+ """Add a canvas edge between two nodes."""
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ manager.add_edge(source, target, modified_by=modified_by)
+ return {"success": True, "message": f"Added edge {source} -> {target}"}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete("/edges")
+async def delete_edge(
+ study_id: str,
+ source: str = Query(..., description="Source node ID"),
+ target: str = Query(..., description="Target node ID"),
+ modified_by: str = Query(default="canvas")
+):
+ """Remove a canvas edge."""
+ manager = get_manager(study_id)
+
+ if not manager.exists():
+ raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
+
+ try:
+ manager.remove_edge(source, target, modified_by=modified_by)
+ return {"success": True, "message": f"Removed edge {source} -> {target}"}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# WebSocket Sync Endpoint
+# ============================================================================
+
+class WebSocketSubscriber:
+ """WebSocket subscriber adapter."""
+
+ def __init__(self, websocket: WebSocket):
+ self.websocket = websocket
+
+ async def send_json(self, data: Dict[str, Any]) -> None:
+ await self.websocket.send_json(data)
+
+
+@router.websocket("/sync")
+async def websocket_sync(websocket: WebSocket, study_id: str):
+ """
+ WebSocket endpoint for real-time spec sync.
+
+ Clients receive notifications when spec changes:
+ - spec_updated: Spec was modified
+ - node_added: New node added
+ - node_removed: Node removed
+ - validation_error: Validation failed
+ """
+ await websocket.accept()
+
+ manager = get_manager(study_id)
+ subscriber = WebSocketSubscriber(websocket)
+
+ # Subscribe to updates
+ manager.subscribe(subscriber)
+
+ try:
+ # Send initial connection ack
+ await websocket.send_json({
+ "type": "connection_ack",
+ "study_id": study_id,
+ "hash": manager.get_hash() if manager.exists() else None,
+ "message": "Connected to spec sync"
+ })
+
+ # Keep connection alive and handle client messages
+ while True:
+ try:
+ data = await asyncio.wait_for(
+ websocket.receive_json(),
+ timeout=30.0 # Heartbeat interval
+ )
+
+ # Handle client messages
+ msg_type = data.get("type")
+
+ if msg_type == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ elif msg_type == "patch_node":
+ # Client requests node update
+ try:
+ manager.update_node(
+ data["node_id"],
+ data.get("data", {}),
+ modified_by=data.get("modified_by", "canvas")
+ )
+ except Exception as e:
+ await websocket.send_json({
+ "type": "error",
+ "message": str(e)
+ })
+
+ elif msg_type == "update_position":
+ # Client updates node position
+ try:
+ manager.update_node_position(
+ data["node_id"],
+ data["position"],
+ modified_by=data.get("modified_by", "canvas")
+ )
+ except Exception as e:
+ await websocket.send_json({
+ "type": "error",
+ "message": str(e)
+ })
+
+ except asyncio.TimeoutError:
+ # Send heartbeat
+ await websocket.send_json({"type": "heartbeat"})
+
+ except WebSocketDisconnect:
+ pass
+ finally:
+ manager.unsubscribe(subscriber)
+
+
+# ============================================================================
+# Create/Initialize Spec
+# ============================================================================
+
+@router.post("/create")
+async def create_spec(
+ study_id: str,
+ spec: Dict[str, Any],
+ modified_by: str = Query(default="api")
+):
+ """
+ Create a new spec for a study.
+
+ Use this when migrating from old config or creating a new study.
+ Will fail if spec already exists (use PUT to replace).
+ """
+ manager = get_manager(study_id)
+
+ if manager.exists():
+ raise HTTPException(
+ status_code=409,
+ detail=f"Spec already exists for '{study_id}'. Use PUT to replace."
+ )
+
+ try:
+ # Ensure meta fields are set
+ if "meta" not in spec:
+ spec["meta"] = {}
+ spec["meta"]["created_by"] = modified_by
+
+ new_hash = manager.save(spec, modified_by=modified_by)
+ return {
+ "success": True,
+ "hash": new_hash,
+ "message": f"Created spec for {study_id}"
+ }
+ except SpecValidationError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/atomizer-dashboard/backend/api/services/__init__.py b/atomizer-dashboard/backend/api/services/__init__.py
index 9cc9c25c..ef581348 100644
--- a/atomizer-dashboard/backend/api/services/__init__.py
+++ b/atomizer-dashboard/backend/api/services/__init__.py
@@ -3,5 +3,13 @@ Atomizer Dashboard Services
"""
from .claude_agent import AtomizerClaudeAgent
+from .spec_manager import SpecManager, SpecManagerError, SpecNotFoundError, SpecConflictError, get_spec_manager
-__all__ = ['AtomizerClaudeAgent']
+__all__ = [
+ 'AtomizerClaudeAgent',
+ 'SpecManager',
+ 'SpecManagerError',
+ 'SpecNotFoundError',
+ 'SpecConflictError',
+ 'get_spec_manager',
+]
diff --git a/atomizer-dashboard/backend/api/services/claude_agent.py b/atomizer-dashboard/backend/api/services/claude_agent.py
index b4135679..ae61d67a 100644
--- a/atomizer-dashboard/backend/api/services/claude_agent.py
+++ b/atomizer-dashboard/backend/api/services/claude_agent.py
@@ -29,9 +29,108 @@ class AtomizerClaudeAgent:
self.client = anthropic.Anthropic()
self.study_id = study_id
self.study_dir = STUDIES_DIR / study_id if study_id else None
+ self.canvas_state: Optional[Dict[str, Any]] = None # Current canvas/spec state
+ self.interview = None # Interview engine instance (if active)
self.tools = self._define_tools()
self.system_prompt = self._build_system_prompt()
+ def set_canvas_state(self, spec: Dict[str, Any]) -> None:
+ """Update the current canvas state for context.
+
+ This should be called:
+ 1. When a study is loaded
+ 2. When the frontend sends a canvas_edit message
+ 3. After any spec modification
+ """
+ self.canvas_state = spec
+ # Rebuild system prompt with new canvas state
+ self.system_prompt = self._build_system_prompt()
+
+ def load_current_spec(self) -> Optional[Dict[str, Any]]:
+ """Load the current atomizer_spec.json and update canvas state"""
+ if not self.study_dir:
+ return None
+ spec_path = self.study_dir / "atomizer_spec.json"
+ if not spec_path.exists():
+ return None
+ with open(spec_path, 'r', encoding='utf-8') as f:
+ spec = json.load(f)
+ self.canvas_state = spec
+ return spec
+
+ def _format_canvas_context(self) -> str:
+ """Format current canvas state for Claude's system prompt.
+
+ This gives Claude real-time awareness of what's on the canvas,
+ enabling bi-directional sync where Claude sees user's edits.
+ """
+ if not self.canvas_state:
+ return ""
+
+ spec = self.canvas_state
+ lines = ["\n## Current Canvas State"]
+ lines.append("*The user can see this canvas. When you modify it, they see changes in real-time.*\n")
+
+ # Model
+ model = spec.get('model', {})
+ sim_path = model.get('sim', {}).get('path', '')
+ if sim_path:
+ lines.append(f"**Model**: `{sim_path}`")
+
+ # Design Variables
+ dvs = spec.get('design_variables', [])
+ if dvs:
+ lines.append(f"\n**Design Variables ({len(dvs)}):**")
+ for dv in dvs:
+ bounds = dv.get('bounds', {})
+ units = f" {dv.get('units', '')}" if dv.get('units') else ""
+ enabled = "" if dv.get('enabled', True) else " (disabled)"
+ lines.append(f" - `{dv.get('id')}`: **{dv.get('name')}** [{bounds.get('min')}, {bounds.get('max')}]{units}{enabled}")
+
+ # Extractors
+ exts = spec.get('extractors', [])
+ if exts:
+ lines.append(f"\n**Extractors ({len(exts)}):**")
+ for ext in exts:
+ ext_type = ext.get('type', 'unknown')
+ enabled = "" if ext.get('enabled', True) else " (disabled)"
+ lines.append(f" - `{ext.get('id')}`: **{ext.get('name')}** ({ext_type}){enabled}")
+
+ # Objectives
+ objs = spec.get('objectives', [])
+ if objs:
+ lines.append(f"\n**Objectives ({len(objs)}):**")
+ for obj in objs:
+ direction = obj.get('direction', 'minimize')
+ weight = obj.get('weight', 1.0)
+ enabled = "" if obj.get('enabled', True) else " (disabled)"
+ weight_str = f" [weight: {weight}]" if weight != 1.0 else ""
+ lines.append(f" - `{obj.get('id')}`: **{direction}** {obj.get('name')}{weight_str}{enabled}")
+
+ # Constraints
+ cons = spec.get('constraints', [])
+ if cons:
+ lines.append(f"\n**Constraints ({len(cons)}):**")
+ for con in cons:
+ op = con.get('operator', '<=')
+ threshold = con.get('threshold', 0)
+ units = f" {con.get('units', '')}" if con.get('units') else ""
+ enabled = "" if con.get('enabled', True) else " (disabled)"
+ lines.append(f" - `{con.get('id')}`: **{con.get('name')}** {op} {threshold}{units}{enabled}")
+
+ # Optimization settings
+ opt = spec.get('optimization', {})
+ if opt:
+ algo = opt.get('algorithm', {}).get('type', 'TPE')
+ budget = opt.get('budget', {}).get('max_trials', 100)
+ lines.append(f"\n**Optimization**: {algo}, max {budget} trials")
+
+ # Empty state message
+ if not dvs and not exts and not objs:
+ lines.append("\n*Canvas is empty. Help the user add design variables, extractors, and objectives.*")
+
+ return "\n".join(lines)
+
def _build_system_prompt(self) -> str:
"""Build context-aware system prompt for Atomizer"""
base_prompt = """You are the Atomizer Assistant - the intelligent assistant built into Atomizer for FEA optimization.
@@ -50,6 +149,28 @@ You help engineers with structural optimization using NX Nastran simulations:
5. **Troubleshoot** - Debug failed trials, identify issues, suggest fixes
6. **Create Studies** - Guide users through setting up new optimization studies
+## POWER MODE - Direct Modification Access
+You have FULL ACCESS to modify the study configuration. When the user asks you to:
+- Add design variables: Use `add_design_variable` tool
+- Add extractors (mass, volume, stress, displacement, custom): Use `add_extractor` tool
+- Add objectives: Use `add_objective` tool
+- Add constraints: Use `add_constraint` tool
+- Update any setting: Use `update_spec_field` tool
+- Remove elements: Use `remove_node` tool
+
+**IMPORTANT**: DO NOT ask for permission. Just DO IT immediately. The user is in Power Mode and expects you to act.
+
+When adding extractors, you can create custom ones with Python code. Example for volume:
+```python
+def extract_volume(op2_path: str) -> Dict[str, float]:
+ # Custom volume calculation from FEA results
+ from pyNastran.op2.op2 import OP2
+ op2 = OP2()
+ op2.read_op2(op2_path)
+ # ... calculation logic
+ return {"volume": calculated_volume}
+```
+
## Atomizer Context
- Atomizer uses Optuna for Bayesian optimization
- Studies can use FEA-only or hybrid FEA/Neural surrogate approaches
@@ -64,6 +185,7 @@ You help engineers with structural optimization using NX Nastran simulations:
- If uncertain, say so and suggest how to verify
- Proactively suggest next steps or insights
- Sound confident and professional - you're a specialized expert system
+- **ACT IMMEDIATELY when asked to add/modify things - don't ask for permission**
"""
@@ -74,6 +196,11 @@ You help engineers with structural optimization using NX Nastran simulations:
else:
base_prompt += "\n## Current Study: None selected\nAsk the user to select a study or help them create a new one.\n"
+ # Add canvas state context (bi-directional sync)
+ canvas_context = self._format_canvas_context()
+ if canvas_context:
+ base_prompt += canvas_context
+
return base_prompt
def _get_study_context(self) -> str:
@@ -265,6 +392,196 @@ You help engineers with structural optimization using NX Nastran simulations:
"properties": {},
"required": []
}
+ },
+ # === WRITE TOOLS (Power Mode) ===
+ {
+ "name": "add_design_variable",
+ "description": "Add a new design variable to the study's atomizer_spec.json. This modifies the spec directly.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "name": {"type": "string", "description": "Variable name (e.g., 'web_thickness')"},
+ "expression_name": {"type": "string", "description": "NX expression name (usually same as name)"},
+ "min_value": {"type": "number", "description": "Minimum bound"},
+ "max_value": {"type": "number", "description": "Maximum bound"},
+ "baseline": {"type": "number", "description": "Initial/baseline value"},
+ "units": {"type": "string", "description": "Units (e.g., 'mm', 'degrees')"}
+ },
+ "required": ["name", "min_value", "max_value"]
+ }
+ },
+ {
+ "name": "add_extractor",
+ "description": "Add a new physics extractor to the study. Can be builtin (mass, displacement, stress, zernike_opd) or custom with Python code.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "name": {"type": "string", "description": "Extractor display name"},
+ "extractor_type": {"type": "string", "description": "Type: mass, displacement, stress, frequency, zernike_opd, or custom"},
+ "config": {"type": "object", "description": "Configuration for the extractor (optional)"},
+ "custom_code": {"type": "string", "description": "For custom extractors: Python function code"},
+ "outputs": {
+ "type": "array",
+ "items": {"type": "object"},
+ "description": "Output definitions: [{name, metric}]"
+ }
+ },
+ "required": ["name", "extractor_type"]
+ }
+ },
+ {
+ "name": "add_objective",
+ "description": "Add a new optimization objective to the study.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "name": {"type": "string", "description": "Objective name/description"},
+ "direction": {"type": "string", "enum": ["minimize", "maximize"], "description": "Optimization direction"},
+ "weight": {"type": "number", "description": "Weight in weighted sum (default: 1.0)"},
+ "target": {"type": "number", "description": "Target value (optional)"},
+ "units": {"type": "string", "description": "Units (e.g., 'nm', 'kg')"},
+ "extractor_id": {"type": "string", "description": "Source extractor ID (e.g., 'ext_001')"},
+ "output_key": {"type": "string", "description": "Output key from extractor"}
+ },
+ "required": ["name", "direction"]
+ }
+ },
+ {
+ "name": "add_constraint",
+ "description": "Add a new constraint to the study.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "name": {"type": "string", "description": "Constraint name/description"},
+ "operator": {"type": "string", "enum": ["<=", ">=", "<", ">", "=="], "description": "Comparison operator"},
+ "threshold": {"type": "number", "description": "Threshold value"},
+ "units": {"type": "string", "description": "Units (optional)"},
+ "extractor_id": {"type": "string", "description": "Source extractor ID"},
+ "output_key": {"type": "string", "description": "Output key from extractor"}
+ },
+ "required": ["name", "operator", "threshold"]
+ }
+ },
+ {
+ "name": "update_spec_field",
+ "description": "Update any field in the atomizer_spec.json using a JSON path.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "path": {"type": "string", "description": "JSON path (e.g., 'design_variables.0.bounds.max', 'objectives.1.weight')"},
+ "value": {"description": "New value to set"}
+ },
+ "required": ["path", "value"]
+ }
+ },
+ {
+ "name": "remove_node",
+ "description": "Remove a design variable, extractor, objective, or constraint by ID.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_id": {"type": "string", "description": "Study ID. Uses current study if not specified."},
+ "node_id": {"type": "string", "description": "Node ID to remove (e.g., 'dv_003', 'ext_002', 'obj_001', 'con_001')"}
+ },
+ "required": ["node_id"]
+ }
+ },
+ {
+ "name": "create_study",
+ "description": "Create a new optimization study with initial configuration. Creates the study folder and atomizer_spec.json.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "study_name": {
+ "type": "string",
+ "description": "Name for the study (snake_case, e.g., 'bracket_mass_optimization')"
+ },
+ "category": {
+ "type": "string",
+ "description": "Parent category folder (e.g., 'Simple_Bracket', 'M1_Mirror'). Created if doesn't exist."
+ },
+ "description": {
+ "type": "string",
+ "description": "Brief description of the optimization goal"
+ },
+ "sim_file": {
+ "type": "string",
+ "description": "Path to the .sim file (relative to study folder or absolute)"
+ },
+ "algorithm": {
+ "type": "string",
+ "enum": ["TPE", "CMA-ES", "NSGA-II", "RandomSearch"],
+ "description": "Optimization algorithm. Default: TPE"
+ },
+ "max_trials": {
+ "type": "integer",
+ "description": "Maximum number of trials. Default: 100"
+ }
+ },
+ "required": ["study_name"]
+ }
+ },
+ # === INTERVIEW TOOLS ===
+ {
+ "name": "start_interview",
+ "description": "Start an interview session to create a new study through guided conversation. Use this when the user wants to create a study but hasn't provided all details upfront.",
+ "input_schema": {
+ "type": "object",
+ "properties": {},
+ "required": []
+ }
+ },
+ {
+ "name": "interview_record",
+ "description": "Record an answer from the user during the interview. Advances the interview state automatically.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "field": {
+ "type": "string",
+ "enum": ["study_name", "category", "description", "sim_file",
+ "design_variable", "extractor", "objective", "constraint",
+ "algorithm", "max_trials", "confirm"],
+ "description": "The field being answered"
+ },
+ "value": {
+ "description": "The value for this field. For multi-value fields (design_variable, etc.), can be a dict or list."
+ }
+ },
+ "required": ["field", "value"]
+ }
+ },
+ {
+ "name": "interview_advance",
+ "description": "Move to the next phase of the interview. Use after gathering all required info for the current phase.",
+ "input_schema": {
+ "type": "object",
+ "properties": {},
+ "required": []
+ }
+ },
+ {
+ "name": "interview_status",
+ "description": "Get the current interview progress and collected data.",
+ "input_schema": {
+ "type": "object",
+ "properties": {},
+ "required": []
+ }
+ },
+ {
+ "name": "interview_finalize",
+ "description": "Finalize the interview and create the study with all collected data.",
+ "input_schema": {
+ "type": "object",
+ "properties": {},
+ "required": []
+ }
}
]
@@ -285,6 +602,32 @@ You help engineers with structural optimization using NX Nastran simulations:
return self._tool_read_readme(tool_input.get('study_id'))
elif tool_name == "list_studies":
return self._tool_list_studies()
+ # === WRITE TOOLS ===
+ elif tool_name == "add_design_variable":
+ return self._tool_add_design_variable(tool_input)
+ elif tool_name == "add_extractor":
+ return self._tool_add_extractor(tool_input)
+ elif tool_name == "add_objective":
+ return self._tool_add_objective(tool_input)
+ elif tool_name == "add_constraint":
+ return self._tool_add_constraint(tool_input)
+ elif tool_name == "update_spec_field":
+ return self._tool_update_spec_field(tool_input)
+ elif tool_name == "remove_node":
+ return self._tool_remove_node(tool_input)
+ elif tool_name == "create_study":
+ return self._tool_create_study(tool_input)
+ # === INTERVIEW TOOLS ===
+ elif tool_name == "start_interview":
+ return self._tool_start_interview()
+ elif tool_name == "interview_record":
+ return self._tool_interview_record(tool_input)
+ elif tool_name == "interview_advance":
+ return self._tool_interview_advance()
+ elif tool_name == "interview_status":
+ return self._tool_interview_status()
+ elif tool_name == "interview_finalize":
+ return self._tool_interview_finalize()
else:
return f"Unknown tool: {tool_name}"
except Exception as e:
@@ -626,6 +969,596 @@ You help engineers with structural optimization using NX Nastran simulations:
return "\n".join(result)
+ # === WRITE TOOL IMPLEMENTATIONS ===
+
+ def _get_spec_path(self, study_id: Optional[str]) -> Path:
+ """Get the atomizer_spec.json path for a study"""
+ study_dir = self._get_study_dir(study_id)
+ spec_path = study_dir / "atomizer_spec.json"
+ return spec_path
+
+ def _load_spec(self, study_id: Optional[str]) -> Dict[str, Any]:
+ """Load the atomizer_spec.json for a study"""
+ spec_path = self._get_spec_path(study_id)
+ if not spec_path.exists():
+ raise ValueError(f"No atomizer_spec.json found in study. Path: {spec_path}")
+ with open(spec_path, 'r', encoding='utf-8') as f:
+ return json.load(f)
+
+ def _save_spec(self, study_id: Optional[str], spec: Dict[str, Any]) -> None:
+ """Save the atomizer_spec.json for a study"""
+ spec_path = self._get_spec_path(study_id)
+ # Update modified_by and modified_at
+ if 'meta' in spec:
+ spec['meta']['modified_by'] = 'claude_agent'
+ spec['meta']['modified_at'] = datetime.now().isoformat()
+ with open(spec_path, 'w', encoding='utf-8') as f:
+ json.dump(spec, f, indent=2)
+
+ def _generate_id(self, prefix: str, existing_ids: List[str]) -> str:
+ """Generate a unique ID with prefix (e.g., 'dv_003')"""
+ max_num = 0
+ for eid in existing_ids:
+ if eid.startswith(prefix):
+ try:
+ num = int(eid.split('_')[1])
+ max_num = max(max_num, num)
+ except (IndexError, ValueError):
+ pass
+ return f"{prefix}{max_num + 1:03d}"
+
+ def _tool_add_design_variable(self, params: Dict[str, Any]) -> str:
+ """Add a design variable to the spec"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ # Get existing IDs
+ existing_ids = [dv.get('id', '') for dv in spec.get('design_variables', [])]
+ new_id = self._generate_id('dv_', existing_ids)
+
+ # Build the new design variable
+ new_dv = {
+ "id": new_id,
+ "name": params['name'],
+ "expression_name": params.get('expression_name', params['name']),
+ "type": "continuous",
+ "bounds": {
+ "min": params['min_value'],
+ "max": params['max_value']
+ },
+ "enabled": True
+ }
+
+ if 'baseline' in params:
+ new_dv['baseline'] = params['baseline']
+ if 'units' in params:
+ new_dv['units'] = params['units']
+
+ # Add canvas position (auto-layout)
+ existing_count = len(spec.get('design_variables', []))
+ new_dv['canvas_position'] = {
+ "x": 50,
+ "y": 100 + existing_count * 80
+ }
+
+ # Add to spec
+ if 'design_variables' not in spec:
+ spec['design_variables'] = []
+ spec['design_variables'].append(new_dv)
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Added design variable '{params['name']}' (ID: {new_id}) with bounds [{params['min_value']}, {params['max_value']}]"
+
+ def _tool_add_extractor(self, params: Dict[str, Any]) -> str:
+ """Add an extractor to the spec"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ # Get existing IDs
+ existing_ids = [ext.get('id', '') for ext in spec.get('extractors', [])]
+ new_id = self._generate_id('ext_', existing_ids)
+
+ # Build the new extractor
+ new_ext = {
+ "id": new_id,
+ "name": params['name'],
+ "type": params['extractor_type'],
+ "enabled": True
+ }
+
+ # Add config if provided
+ if 'config' in params and params['config']:
+ new_ext['config'] = params['config']
+
+ # Add custom code if provided
+ if params['extractor_type'] == 'custom' and 'custom_code' in params:
+ new_ext['custom'] = {
+ "function_code": params['custom_code'],
+ "inputs": params.get('inputs', ["op2_path"]),
+ "dependencies": params.get('dependencies', ["numpy", "pyNastran"])
+ }
+
+ # Add outputs
+ if 'outputs' in params:
+ new_ext['outputs'] = params['outputs']
+ else:
+ # Default output based on type
+ output_name = params['name'].lower().replace(' ', '_')
+ new_ext['outputs'] = [{"name": output_name, "metric": "scalar"}]
+
+ # Add canvas position
+ existing_count = len(spec.get('extractors', []))
+ new_ext['canvas_position'] = {
+ "x": 400,
+ "y": 100 + existing_count * 80
+ }
+
+ # Add to spec
+ if 'extractors' not in spec:
+ spec['extractors'] = []
+ spec['extractors'].append(new_ext)
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Added extractor '{params['name']}' (ID: {new_id}, type: {params['extractor_type']})"
+
+ def _tool_add_objective(self, params: Dict[str, Any]) -> str:
+ """Add an objective to the spec"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ # Get existing IDs
+ existing_ids = [obj.get('id', '') for obj in spec.get('objectives', [])]
+ new_id = self._generate_id('obj_', existing_ids)
+
+ # Build the new objective
+ new_obj = {
+ "id": new_id,
+ "name": params['name'],
+ "direction": params['direction'],
+ "weight": params.get('weight', 1.0),
+ "enabled": True
+ }
+
+ if 'target' in params:
+ new_obj['target'] = params['target']
+ if 'units' in params:
+ new_obj['units'] = params['units']
+ if 'extractor_id' in params:
+ new_obj['source'] = {
+ "extractor_id": params['extractor_id'],
+ "output_key": params.get('output_key', 'value')
+ }
+
+ # Add canvas position
+ existing_count = len(spec.get('objectives', []))
+ new_obj['canvas_position'] = {
+ "x": 750,
+ "y": 100 + existing_count * 80
+ }
+
+ # Add to spec
+ if 'objectives' not in spec:
+ spec['objectives'] = []
+ spec['objectives'].append(new_obj)
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Added objective '{params['name']}' (ID: {new_id}, direction: {params['direction']}, weight: {params.get('weight', 1.0)})"
+
+ def _tool_add_constraint(self, params: Dict[str, Any]) -> str:
+ """Add a constraint to the spec"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ # Get existing IDs
+ existing_ids = [con.get('id', '') for con in spec.get('constraints', [])]
+ new_id = self._generate_id('con_', existing_ids)
+
+ # Build the new constraint
+ new_con = {
+ "id": new_id,
+ "name": params['name'],
+ "operator": params['operator'],
+ "threshold": params['threshold'],
+ "enabled": True
+ }
+
+ if 'units' in params:
+ new_con['units'] = params['units']
+ if 'extractor_id' in params:
+ new_con['source'] = {
+ "extractor_id": params['extractor_id'],
+ "output_key": params.get('output_key', 'value')
+ }
+
+ # Add canvas position
+ existing_count = len(spec.get('constraints', []))
+ new_con['canvas_position'] = {
+ "x": 750,
+ "y": 400 + existing_count * 80
+ }
+
+ # Add to spec
+ if 'constraints' not in spec:
+ spec['constraints'] = []
+ spec['constraints'].append(new_con)
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Added constraint '{params['name']}' (ID: {new_id}, {params['operator']} {params['threshold']})"
+
+ def _tool_update_spec_field(self, params: Dict[str, Any]) -> str:
+ """Update a field in the spec using a JSON path"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ path = params['path']
+ value = params['value']
+
+ # Parse and navigate the path
+ parts = path.split('.')
+ current = spec
+
+ for i, part in enumerate(parts[:-1]):
+ # Check if part is an array index
+ if part.isdigit():
+ idx = int(part)
+ if not isinstance(current, list) or idx >= len(current):
+ return f"✗ Invalid path: index {idx} out of range at '{'.'.join(parts[:i+1])}'"
+ current = current[idx]
+ else:
+ if not isinstance(current, dict) or part not in current:
+ return f"✗ Invalid path: key '{part}' not found at '{'.'.join(parts[:i+1])}'"
+ current = current[part]
+
+ # Set the final value
+ final_key = parts[-1]
+ if final_key.isdigit():
+ idx = int(final_key)
+ if isinstance(current, list) and idx < len(current):
+ old_value = current[idx]
+ current[idx] = value
+ else:
+ return f"✗ Invalid path: cannot set index {idx}"
+ else:
+ old_value = current.get(final_key, '')
+ current[final_key] = value
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Updated '{path}': {old_value} → {value}"
+
+ def _tool_remove_node(self, params: Dict[str, Any]) -> str:
+ """Remove a node (design variable, extractor, objective, or constraint) by ID"""
+ study_id = params.get('study_id')
+ spec = self._load_spec(study_id)
+
+ node_id = params['node_id']
+
+ # Determine the collection based on prefix
+ if node_id.startswith('dv_'):
+ collection_key = 'design_variables'
+ elif node_id.startswith('ext_'):
+ collection_key = 'extractors'
+ elif node_id.startswith('obj_'):
+ collection_key = 'objectives'
+ elif node_id.startswith('con_'):
+ collection_key = 'constraints'
+ else:
+ return f"✗ Unknown node type for ID: {node_id}. Expected prefix: dv_, ext_, obj_, or con_"
+
+ collection = spec.get(collection_key, [])
+
+ # Find and remove the node
+ original_len = len(collection)
+ spec[collection_key] = [item for item in collection if item.get('id') != node_id]
+
+ if len(spec[collection_key]) == original_len:
+ return f"✗ Node '{node_id}' not found in {collection_key}"
+
+ # Also remove any edges referencing this node
+ if 'canvas' in spec and 'edges' in spec['canvas']:
+ spec['canvas']['edges'] = [
+ edge for edge in spec['canvas']['edges']
+ if edge.get('source') != node_id and edge.get('target') != node_id
+ ]
+
+ # Save
+ self._save_spec(study_id, spec)
+
+ return f"✓ Removed {collection_key.rstrip('s')} '{node_id}'"
+
+ def _tool_create_study(self, params: Dict[str, Any]) -> str:
+ """Create a new optimization study with initial atomizer_spec.json"""
+ study_name = params['study_name']
+ category = params.get('category', '')
+ description = params.get('description', '')
+ sim_file = params.get('sim_file', '')
+ algorithm = params.get('algorithm', 'TPE')
+ max_trials = params.get('max_trials', 100)
+
+ # Validate study name (snake_case)
+ import re
+ if not re.match(r'^[a-z][a-z0-9_]*$', study_name):
+ return f"✗ Invalid study name '{study_name}'. Use snake_case (e.g., 'bracket_mass_opt')"
+
+ # Build study path
+ if category:
+ study_dir = STUDIES_DIR / category / study_name
+ study_id = f"{category}/{study_name}"
+ else:
+ study_dir = STUDIES_DIR / study_name
+ study_id = study_name
+
+ # Check if already exists
+ if study_dir.exists():
+ return f"✗ Study '{study_id}' already exists"
+
+ # Create directory structure
+ study_dir.mkdir(parents=True, exist_ok=True)
+ (study_dir / "1_setup").mkdir(exist_ok=True)
+ (study_dir / "2_iterations").mkdir(exist_ok=True)
+ (study_dir / "3_results").mkdir(exist_ok=True)
+
+ # Create initial atomizer_spec.json
+ spec = {
+ "meta": {
+ "version": "2.0",
+ "study_name": study_name,
+ "description": description,
+ "created_at": datetime.now().isoformat(),
+ "created_by": "claude_agent",
+ "modified_at": datetime.now().isoformat(),
+ "modified_by": "claude_agent"
+ },
+ "model": {
+ "sim": {
+ "path": sim_file,
+ "solver": "nastran"
+ }
+ },
+ "design_variables": [],
+ "extractors": [],
+ "objectives": [],
+ "constraints": [],
+ "optimization": {
+ "algorithm": {
+ "type": algorithm
+ },
+ "budget": {
+ "max_trials": max_trials
+ }
+ },
+ "canvas": {
+ "edges": [],
+ "layout_version": "2.0"
+ }
+ }
+
+ # Write spec
+ spec_path = study_dir / "atomizer_spec.json"
+ with open(spec_path, 'w', encoding='utf-8') as f:
+ json.dump(spec, f, indent=2)
+
+ # Create README.md
+ readme_content = f"""# {study_name.replace('_', ' ').title()}
+
+## Description
+{description if description else 'Add study description here.'}
+
+## Optimization Setup
+- **Algorithm**: {algorithm}
+- **Max Trials**: {max_trials}
+
+## Design Variables
+*Add design variables using the canvas or assistant.*
+
+## Objectives
+*Add objectives using the canvas or assistant.*
+
+---
+*Created by Atomizer Assistant*
+"""
+ readme_path = study_dir / "README.md"
+ with open(readme_path, 'w', encoding='utf-8') as f:
+ f.write(readme_content)
+
+ # Update agent to point to new study
+ self.study_id = study_id
+ self.study_dir = study_dir
+ self.canvas_state = spec
+ self.system_prompt = self._build_system_prompt()
+
+ return f"✓ Created study '{study_id}' at {study_dir}\n\nNext steps:\n1. Copy your NX model files (.prt, .fem, .sim) to 1_setup/\n2. Add design variables (NX expressions)\n3. Add extractors (mass, displacement, etc.)\n4. Add objectives to optimize"
+
+ # === INTERVIEW TOOL IMPLEMENTATIONS ===
+
+ def _tool_start_interview(self) -> str:
+ """Start a new interview session"""
+ from api.services.interview_engine import InterviewEngine
+
+ self.interview = InterviewEngine()
+ result = self.interview.start()
+
+ questions = result.get("next_questions", [])
+ question_text = "\n".join([
+ f"• **{q['field']}**: {q['question']}\n *{q.get('hint', '')}*"
+ for q in questions[:3]
+ ])
+
+ return f"""✓ Interview started!
+
+**Current Phase**: {result['state']}
+
+{result['message']}
+
+**Questions to ask:**
+{question_text}
+
+Ask the user about these items one at a time, then use `interview_record` to save their answers."""
+
+ def _tool_interview_record(self, params: Dict[str, Any]) -> str:
+ """Record an interview answer"""
+ if not self.interview:
+ return "✗ No interview in progress. Use `start_interview` first."
+
+ field = params['field']
+ value = params['value']
+
+ result = self.interview.record_answer(field, value)
+
+ return f"""✓ Recorded: **{field}** = {json.dumps(value) if isinstance(value, (dict, list)) else value}
+
+**Current State**: {result['state']}
+**Progress**: {json.dumps(result['data_so_far'], indent=2)}
+
+Use `interview_advance` when you've gathered enough info for this phase, or continue asking about other fields."""
+
+ def _tool_interview_advance(self) -> str:
+ """Advance to next interview phase"""
+ if not self.interview:
+ return "✗ No interview in progress. Use `start_interview` first."
+
+ result = self.interview.advance_state()
+
+ questions = result.get("next_questions", [])
+ if questions:
+ question_text = "\n".join([
+ f"• **{q['field']}**: {q['question']}\n *{q.get('hint', '')}*"
+ for q in questions[:3]
+ ])
+ else:
+ question_text = "*No more questions for this phase*"
+
+ return f"""✓ Advanced to: **{result['state']}**
+
+**Next Questions:**
+{question_text}
+
+Continue gathering information or use `interview_finalize` when ready to create the study."""
+
+ def _tool_interview_status(self) -> str:
+ """Get current interview status"""
+ if not self.interview:
+ return "No interview in progress. Use `start_interview` to begin."
+
+ progress = self.interview.get_progress()
+
+ return f"""**Interview Progress: {progress['progress_percent']}%**
+
+**Current Phase**: {progress['state']}
+
+**Collected Data**:
+{json.dumps(progress['summary'], indent=2)}
+
+**Validation**:
+{json.dumps(self.interview.validate(), indent=2)}"""
+
+ def _tool_interview_finalize(self) -> str:
+ """Finalize interview and create study"""
+ if not self.interview:
+ return "✗ No interview in progress. Use `start_interview` first."
+
+ result = self.interview.finalize()
+
+ if not result['success']:
+ return f"✗ Cannot finalize - missing required data:\n" + "\n".join(f" - {e}" for e in result['errors'])
+
+ # Create the study using the generated spec
+ spec = result['spec']
+ study_name = spec['meta']['study_name']
+ category = self.interview.data.category
+
+ # Build study path
+ if category:
+ study_dir = STUDIES_DIR / category / study_name
+ study_id = f"{category}/{study_name}"
+ else:
+ study_dir = STUDIES_DIR / study_name
+ study_id = study_name
+
+ # Check if already exists
+ if study_dir.exists():
+ return f"✗ Study '{study_id}' already exists. Choose a different name."
+
+ # Create directory structure
+ study_dir.mkdir(parents=True, exist_ok=True)
+ (study_dir / "1_setup").mkdir(exist_ok=True)
+ (study_dir / "2_iterations").mkdir(exist_ok=True)
+ (study_dir / "3_results").mkdir(exist_ok=True)
+
+ # Write spec
+ spec_path = study_dir / "atomizer_spec.json"
+ with open(spec_path, 'w', encoding='utf-8') as f:
+ json.dump(spec, f, indent=2)
+
+ # Create README
+ readme_content = f"""# {study_name.replace('_', ' ').title()}
+
+## Description
+{spec['meta'].get('description', 'Optimization study created via interview.')}
+
+## Design Variables
+{chr(10).join(f"- **{dv['name']}**: [{dv.get('bounds', {}).get('min', '?')}, {dv.get('bounds', {}).get('max', '?')}]" for dv in spec['design_variables']) or '*None defined*'}
+
+## Extractors
+{chr(10).join(f"- **{ext['name']}** ({ext.get('type', 'custom')})" for ext in spec['extractors']) or '*None defined*'}
+
+## Objectives
+{chr(10).join(f"- **{obj['name']}**: {obj.get('direction', 'minimize')}" for obj in spec['objectives']) or '*None defined*'}
+
+## Constraints
+{chr(10).join(f"- **{con['name']}** {con.get('operator', '<=')} {con.get('threshold', 0)}" for con in spec.get('constraints', [])) or '*None defined*'}
+
+## Optimization Settings
+- **Algorithm**: {spec['optimization']['algorithm']['type']}
+- **Max Trials**: {spec['optimization']['budget']['max_trials']}
+
+---
+*Created by Atomizer Assistant via Interview*
+"""
+ readme_path = study_dir / "README.md"
+ with open(readme_path, 'w', encoding='utf-8') as f:
+ f.write(readme_content)
+
+ # Update agent context
+ self.study_id = study_id
+ self.study_dir = study_dir
+ self.canvas_state = spec
+ self.interview = None # Clear interview
+ self.system_prompt = self._build_system_prompt()
+
+ # Build warnings message
+ warnings_msg = ""
+ if result.get('warnings'):
+ warnings_msg = "\n\n**Warnings:**\n" + "\n".join(f" ⚠️ {w}" for w in result['warnings'])
+
+ return f"""✓ Study created successfully!
+
+**Study ID**: {study_id}
+**Location**: {study_dir}
+
+**Configuration**:
+- Design Variables: {len(spec['design_variables'])}
+- Extractors: {len(spec['extractors'])}
+- Objectives: {len(spec['objectives'])}
+- Constraints: {len(spec.get('constraints', []))}
+- Algorithm: {spec['optimization']['algorithm']['type']}
+- Max Trials: {spec['optimization']['budget']['max_trials']}
+{warnings_msg}
+
+The canvas has been updated with the new study configuration. You can now:
+1. Review and refine the configuration in the canvas
+2. Copy your NX model files to 1_setup/
+3. Start the optimization when ready"""
+
async def chat(self, message: str, conversation_history: Optional[List[Dict]] = None) -> Dict[str, Any]:
"""
Process a chat message with tool use support
@@ -698,7 +1631,7 @@ You help engineers with structural optimization using NX Nastran simulations:
async def chat_stream(self, message: str, conversation_history: Optional[List[Dict]] = None) -> AsyncGenerator[str, None]:
"""
- Stream a chat response token by token
+ Stream a chat response token by token (simple, no tool use)
Args:
message: User's message
@@ -710,8 +1643,7 @@ You help engineers with structural optimization using NX Nastran simulations:
messages = conversation_history.copy() if conversation_history else []
messages.append({"role": "user", "content": message})
- # For streaming, we'll do a simpler approach without tool use for now
- # (Tool use with streaming is more complex)
+ # Simple streaming without tool use
with self.client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=4096,
@@ -720,3 +1652,127 @@ You help engineers with structural optimization using NX Nastran simulations:
) as stream:
for text in stream.text_stream:
yield text
+
+ async def chat_stream_with_tools(
+ self,
+ message: str,
+ conversation_history: Optional[List[Dict]] = None
+ ) -> AsyncGenerator[Dict[str, Any], None]:
+ """
+ Stream a chat response with full tool use support.
+
+ Yields events:
+ - {"type": "text", "content": "..."} - Text token
+ - {"type": "tool_call", "tool": {...}} - Tool being called
+ - {"type": "tool_result", "tool": "...", "result": "..."} - Tool result
+ - {"type": "done", "response": "...", "tool_calls": [...]} - Final summary
+
+ Args:
+ message: User's message
+ conversation_history: Previous messages for context
+
+ Yields:
+ Event dicts with type and content
+ """
+ messages = conversation_history.copy() if conversation_history else []
+ messages.append({"role": "user", "content": message})
+
+ tool_calls_made = []
+ accumulated_text = ""
+
+ # Loop to handle multiple rounds of tool use
+ while True:
+ current_text = ""
+ current_tool_uses = []
+
+ # Use streaming for each API call
+ with self.client.messages.stream(
+ model="claude-sonnet-4-20250514",
+ max_tokens=4096,
+ system=self.system_prompt,
+ tools=self.tools,
+ messages=messages
+ ) as stream:
+ for event in stream:
+ # Handle different event types from the stream
+ if hasattr(event, 'type'):
+ if event.type == 'content_block_start':
+ if hasattr(event, 'content_block'):
+ block = event.content_block
+ if hasattr(block, 'type') and block.type == 'tool_use':
+ # Tool use starting
+ current_tool_uses.append({
+ 'id': block.id,
+ 'name': block.name,
+ 'input': {}
+ })
+ yield {"type": "tool_call", "tool": {"name": block.name, "id": block.id}}
+
+ elif event.type == 'content_block_delta':
+ if hasattr(event, 'delta'):
+ delta = event.delta
+ if hasattr(delta, 'type'):
+ if delta.type == 'text_delta' and hasattr(delta, 'text'):
+ # Text token
+ current_text += delta.text
+ yield {"type": "text", "content": delta.text}
+ elif delta.type == 'input_json_delta' and hasattr(delta, 'partial_json'):
+ # Tool input being built (we accumulate it)
+ pass
+
+ # Get the final message to check stop reason and get complete content
+ final_message = stream.get_final_message()
+
+ # Check if we need to process tool calls
+ if final_message.stop_reason == "tool_use":
+ # Extract tool uses from final message
+ tool_results_content = []
+
+ for block in final_message.content:
+ if block.type == "tool_use":
+ tool_name = block.name
+ tool_input = block.input
+ tool_id = block.id
+
+ # Execute the tool
+ result = self._execute_tool(tool_name, tool_input)
+
+ tool_calls_made.append({
+ "tool": tool_name,
+ "input": tool_input,
+ "result_preview": result[:200] + "..." if len(result) > 200 else result
+ })
+
+ # Yield tool result event
+ yield {
+ "type": "tool_result",
+ "tool": tool_name,
+ "result": result[:500] + "..." if len(result) > 500 else result
+ }
+
+ tool_results_content.append({
+ "type": "tool_result",
+ "tool_use_id": tool_id,
+ "content": result
+ })
+
+ # Add to messages for next iteration
+ messages.append({"role": "assistant", "content": final_message.content})
+ messages.append({"role": "user", "content": tool_results_content})
+
+ else:
+ # No more tool use - we're done
+ accumulated_text += current_text
+
+ # Extract final text from content
+ for block in final_message.content:
+ if hasattr(block, 'text') and not accumulated_text:
+ accumulated_text = block.text
+
+ # Yield done event
+ yield {
+ "type": "done",
+ "response": accumulated_text,
+ "tool_calls": tool_calls_made
+ }
+ break
diff --git a/atomizer-dashboard/backend/api/services/context_builder.py b/atomizer-dashboard/backend/api/services/context_builder.py
index d0f9f486..da6faf9e 100644
--- a/atomizer-dashboard/backend/api/services/context_builder.py
+++ b/atomizer-dashboard/backend/api/services/context_builder.py
@@ -43,7 +43,11 @@ class ContextBuilder:
# Canvas context takes priority - if user is working on a canvas, include it
if canvas_state:
+ node_count = len(canvas_state.get("nodes", []))
+ print(f"[ContextBuilder] Including canvas context with {node_count} nodes")
parts.append(self._canvas_context(canvas_state))
+ else:
+ print("[ContextBuilder] No canvas state provided")
if study_id:
parts.append(self._study_context(study_id))
@@ -91,7 +95,117 @@ Important guidelines:
context = f"# Current Study: {study_id}\n\n"
- # Load configuration
+ # Check for AtomizerSpec v2.0 first (preferred)
+ spec_path = study_dir / "1_setup" / "atomizer_spec.json"
+ if not spec_path.exists():
+ spec_path = study_dir / "atomizer_spec.json"
+
+ if spec_path.exists():
+ context += self._spec_context(spec_path)
+ else:
+ # Fall back to legacy optimization_config.json
+ context += self._legacy_config_context(study_dir)
+
+ # Check for results
+ db_path = study_dir / "3_results" / "study.db"
+ if db_path.exists():
+ try:
+ conn = sqlite3.connect(db_path)
+ count = conn.execute(
+ "SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
+ ).fetchone()[0]
+
+ best = conn.execute("""
+ SELECT MIN(tv.value) FROM trial_values tv
+ JOIN trials t ON tv.trial_id = t.trial_id
+ WHERE t.state = 'COMPLETE'
+ """).fetchone()[0]
+
+ context += f"\n## Results Status\n\n"
+ context += f"- **Trials completed**: {count}\n"
+ if best is not None:
+ context += f"- **Best objective**: {best:.6f}\n"
+
+ conn.close()
+ except Exception:
+ pass
+
+ return context
+
+ def _spec_context(self, spec_path: Path) -> str:
+ """Build context from AtomizerSpec v2.0 file"""
+ context = "**Format**: AtomizerSpec v2.0\n\n"
+
+ try:
+ with open(spec_path) as f:
+ spec = json.load(f)
+
+ context += "## Configuration\n\n"
+
+ # Design variables
+ dvs = spec.get("design_variables", [])
+ if dvs:
+ context += "**Design Variables:**\n"
+ for dv in dvs[:10]:
+ bounds = dv.get("bounds", {})
+ bound_str = f"[{bounds.get('min', '?')}, {bounds.get('max', '?')}]"
+ enabled = "✓" if dv.get("enabled", True) else "✗"
+ context += f"- {dv.get('name', 'unnamed')}: {bound_str} {enabled}\n"
+ if len(dvs) > 10:
+ context += f"- ... and {len(dvs) - 10} more\n"
+
+ # Extractors
+ extractors = spec.get("extractors", [])
+ if extractors:
+ context += "\n**Extractors:**\n"
+ for ext in extractors:
+ ext_type = ext.get("type", "unknown")
+ outputs = ext.get("outputs", [])
+ output_names = [o.get("name", "?") for o in outputs[:3]]
+ builtin = "builtin" if ext.get("builtin", True) else "custom"
+ context += f"- {ext.get('name', 'unnamed')} ({ext_type}, {builtin}): outputs {output_names}\n"
+
+ # Objectives
+ objs = spec.get("objectives", [])
+ if objs:
+ context += "\n**Objectives:**\n"
+ for obj in objs:
+ direction = obj.get("direction", "minimize")
+ weight = obj.get("weight", 1.0)
+ context += f"- {obj.get('name', 'unnamed')} ({direction}, weight={weight})\n"
+
+ # Constraints
+ constraints = spec.get("constraints", [])
+ if constraints:
+ context += "\n**Constraints:**\n"
+ for c in constraints:
+ op = c.get("operator", "<=")
+ thresh = c.get("threshold", "?")
+ context += f"- {c.get('name', 'unnamed')}: {op} {thresh}\n"
+
+ # Optimization settings
+ opt = spec.get("optimization", {})
+ algo = opt.get("algorithm", {})
+ budget = opt.get("budget", {})
+ method = algo.get("type", "TPE")
+ max_trials = budget.get("max_trials", "not set")
+ context += f"\n**Optimization**: {method}, max_trials: {max_trials}\n"
+
+ # Surrogate
+ surrogate = opt.get("surrogate", {})
+ if surrogate.get("enabled"):
+ sur_type = surrogate.get("type", "gaussian_process")
+ context += f"**Surrogate**: {sur_type} enabled\n"
+
+ except (json.JSONDecodeError, IOError) as e:
+ context += f"\n*Spec file exists but could not be parsed: {e}*\n"
+
+ return context
+
+ def _legacy_config_context(self, study_dir: Path) -> str:
+ """Build context from legacy optimization_config.json"""
+ context = "**Format**: Legacy optimization_config.json\n\n"
+
config_path = study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = study_dir / "optimization_config.json"
@@ -135,30 +249,8 @@ Important guidelines:
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Config file exists but could not be parsed: {e}*\n"
-
- # Check for results
- db_path = study_dir / "3_results" / "study.db"
- if db_path.exists():
- try:
- conn = sqlite3.connect(db_path)
- count = conn.execute(
- "SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
- ).fetchone()[0]
-
- best = conn.execute("""
- SELECT MIN(tv.value) FROM trial_values tv
- JOIN trials t ON tv.trial_id = t.trial_id
- WHERE t.state = 'COMPLETE'
- """).fetchone()[0]
-
- context += f"\n## Results Status\n\n"
- context += f"- **Trials completed**: {count}\n"
- if best is not None:
- context += f"- **Best objective**: {best:.6f}\n"
-
- conn.close()
- except Exception:
- pass
+ else:
+ context += "*No configuration file found.*\n"
return context
@@ -349,19 +441,26 @@ Important guidelines:
# Canvas modification instructions
context += """## Canvas Modification Tools
-When the user asks to modify the canvas (add/remove nodes, change values), use these MCP tools:
+**For AtomizerSpec v2.0 studies (preferred):**
+Use spec tools when working with v2.0 studies (check if study uses `atomizer_spec.json`):
+- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
+- `spec_add_node` - Add design variables, extractors, objectives, or constraints
+- `spec_remove_node` - Remove nodes from the spec
+- `spec_add_custom_extractor` - Add a Python-based custom extractor function
+
+**For Legacy Canvas (optimization_config.json):**
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
- `canvas_update_node` - Update node properties (bounds, weights, names)
- `canvas_remove_node` - Remove a node from the canvas
- `canvas_connect_nodes` - Create an edge between nodes
**Example user requests you can handle:**
-- "Add a design variable called hole_diameter with range 5-15 mm" → Use canvas_add_node
-- "Change the weight of wfe_40_20 to 8" → Use canvas_update_node
-- "Remove the constraint node" → Use canvas_remove_node
-- "Connect the new extractor to the objective" → Use canvas_connect_nodes
+- "Add a design variable called hole_diameter with range 5-15 mm" → Use spec_add_node or canvas_add_node
+- "Change the weight of wfe_40_20 to 8" → Use spec_modify or canvas_update_node
+- "Remove the constraint node" → Use spec_remove_node or canvas_remove_node
+- "Add a custom extractor that computes stress ratio" → Use spec_add_custom_extractor
-Always respond with confirmation of changes made to the canvas.
+Always respond with confirmation of changes made to the canvas/spec.
"""
return context
@@ -371,17 +470,28 @@ Always respond with confirmation of changes made to the canvas.
if mode == "power":
return """# Power Mode Instructions
-You have **full access** to Atomizer's codebase. You can:
-- Edit any file using `edit_file` tool
-- Create new files with `create_file` tool
-- Create new extractors with `create_extractor` tool
-- Run shell commands with `run_shell_command` tool
-- Search codebase with `search_codebase` tool
-- Commit and push changes
+You have **FULL ACCESS** to modify Atomizer studies. **DO NOT ASK FOR PERMISSION** - just do it.
-**Use these powers responsibly.** Always explain what you're doing and why.
+## Direct Actions (no confirmation needed):
+- **Add design variables**: Use `canvas_add_node` or `spec_add_node` with node_type="designVar"
+- **Add extractors**: Use `canvas_add_node` with node_type="extractor"
+- **Add objectives**: Use `canvas_add_node` with node_type="objective"
+- **Add constraints**: Use `canvas_add_node` with node_type="constraint"
+- **Update node properties**: Use `canvas_update_node` or `spec_modify`
+- **Remove nodes**: Use `canvas_remove_node`
+- **Edit atomizer_spec.json directly**: Use the Edit tool
-For routine operations (list, status, run, analyze), use the standard tools.
+## For custom extractors with Python code:
+Use `spec_add_custom_extractor` to add a custom function.
+
+## IMPORTANT:
+- You have --dangerously-skip-permissions enabled
+- The user has explicitly granted you power mode access
+- **ACT IMMEDIATELY** when asked to add/modify/remove things
+- Explain what you did AFTER doing it, not before
+- Do NOT say "I need permission" - you already have it
+
+Example: If user says "add a volume extractor", immediately use canvas_add_node to add it.
"""
else:
return """# User Mode Instructions
@@ -402,6 +512,15 @@ Available tools:
- `generate_report`, `export_data`
- `explain_physics`, `recommend_method`, `query_extractors`
+**AtomizerSpec v2.0 Tools (preferred for new studies):**
+- `spec_get` - Get the full AtomizerSpec for a study
+- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
+- `spec_add_node` - Add design variables, extractors, objectives, or constraints
+- `spec_remove_node` - Remove nodes from the spec
+- `spec_validate` - Validate spec against JSON Schema
+- `spec_add_custom_extractor` - Add a Python-based custom extractor function
+- `spec_create_from_description` - Create a new study from natural language description
+
**Canvas Tools (for visual workflow builder):**
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
- `execute_canvas_intent` - Create a study from a canvas intent
diff --git a/atomizer-dashboard/backend/api/services/interview_engine.py b/atomizer-dashboard/backend/api/services/interview_engine.py
new file mode 100644
index 00000000..f78baded
--- /dev/null
+++ b/atomizer-dashboard/backend/api/services/interview_engine.py
@@ -0,0 +1,454 @@
+"""
+Interview Engine - Guided Study Creation through Conversation
+
+Provides a structured interview flow for creating optimization studies.
+Claude uses this to gather information step-by-step, building a complete
+atomizer_spec.json through natural conversation.
+"""
+
+from typing import Dict, Any, List, Optional, Literal
+from dataclasses import dataclass, field
+from enum import Enum
+from datetime import datetime
+import json
+
+
+class InterviewState(str, Enum):
+ """Current phase of the interview"""
+ NOT_STARTED = "not_started"
+ GATHERING_BASICS = "gathering_basics" # Name, description, goals
+ GATHERING_MODEL = "gathering_model" # Model file, solver type
+ GATHERING_VARIABLES = "gathering_variables" # Design variables
+ GATHERING_EXTRACTORS = "gathering_extractors" # Physics extractors
+ GATHERING_OBJECTIVES = "gathering_objectives" # Objectives
+ GATHERING_CONSTRAINTS = "gathering_constraints" # Constraints
+ GATHERING_SETTINGS = "gathering_settings" # Algorithm, trials
+ REVIEW = "review" # Review before creation
+ COMPLETED = "completed"
+
+
+@dataclass
+class InterviewData:
+ """Accumulated data from the interview"""
+ # Basics
+ study_name: Optional[str] = None
+ category: Optional[str] = None
+ description: Optional[str] = None
+ goals: List[str] = field(default_factory=list)
+
+ # Model
+ sim_file: Optional[str] = None
+ prt_file: Optional[str] = None
+ solver_type: str = "nastran"
+
+ # Design variables
+ design_variables: List[Dict[str, Any]] = field(default_factory=list)
+
+ # Extractors
+ extractors: List[Dict[str, Any]] = field(default_factory=list)
+
+ # Objectives
+ objectives: List[Dict[str, Any]] = field(default_factory=list)
+
+ # Constraints
+ constraints: List[Dict[str, Any]] = field(default_factory=list)
+
+ # Settings
+ algorithm: str = "TPE"
+ max_trials: int = 100
+
+ def to_spec(self) -> Dict[str, Any]:
+ """Convert interview data to atomizer_spec.json format"""
+ # Generate IDs for each element
+ dvs_with_ids = []
+ for i, dv in enumerate(self.design_variables):
+ dv_copy = dv.copy()
+ dv_copy['id'] = f"dv_{i+1:03d}"
+ dv_copy['canvas_position'] = {'x': 50, 'y': 100 + i * 80}
+ dvs_with_ids.append(dv_copy)
+
+ exts_with_ids = []
+ for i, ext in enumerate(self.extractors):
+ ext_copy = ext.copy()
+ ext_copy['id'] = f"ext_{i+1:03d}"
+ ext_copy['canvas_position'] = {'x': 400, 'y': 100 + i * 80}
+ exts_with_ids.append(ext_copy)
+
+ objs_with_ids = []
+ for i, obj in enumerate(self.objectives):
+ obj_copy = obj.copy()
+ obj_copy['id'] = f"obj_{i+1:03d}"
+ obj_copy['canvas_position'] = {'x': 750, 'y': 100 + i * 80}
+ objs_with_ids.append(obj_copy)
+
+ cons_with_ids = []
+ for i, con in enumerate(self.constraints):
+ con_copy = con.copy()
+ con_copy['id'] = f"con_{i+1:03d}"
+ con_copy['canvas_position'] = {'x': 750, 'y': 400 + i * 80}
+ cons_with_ids.append(con_copy)
+
+ return {
+ "meta": {
+ "version": "2.0",
+ "study_name": self.study_name or "untitled_study",
+ "description": self.description or "",
+ "created_at": datetime.now().isoformat(),
+ "created_by": "interview",
+ "modified_at": datetime.now().isoformat(),
+ "modified_by": "interview"
+ },
+ "model": {
+ "sim": {
+ "path": self.sim_file or "",
+ "solver": self.solver_type
+ }
+ },
+ "design_variables": dvs_with_ids,
+ "extractors": exts_with_ids,
+ "objectives": objs_with_ids,
+ "constraints": cons_with_ids,
+ "optimization": {
+ "algorithm": {
+ "type": self.algorithm
+ },
+ "budget": {
+ "max_trials": self.max_trials
+ }
+ },
+ "canvas": {
+ "edges": [],
+ "layout_version": "2.0"
+ }
+ }
+
+
+class InterviewEngine:
+ """
+ Manages the interview flow for study creation.
+
+ Usage:
+ 1. Create engine: engine = InterviewEngine()
+ 2. Start interview: engine.start()
+ 3. Record answers: engine.record_answer("study_name", "bracket_opt")
+ 4. Check progress: engine.get_progress()
+ 5. Generate spec: engine.finalize()
+ """
+
+ def __init__(self):
+ self.state = InterviewState.NOT_STARTED
+ self.data = InterviewData()
+ self.questions_asked: List[str] = []
+ self.errors: List[str] = []
+
+ def start(self) -> Dict[str, Any]:
+ """Start the interview process"""
+ self.state = InterviewState.GATHERING_BASICS
+ return {
+ "state": self.state.value,
+ "message": "Let's create a new optimization study! I'll guide you through the process.",
+ "next_questions": self.get_current_questions()
+ }
+
+ def get_current_questions(self) -> List[Dict[str, Any]]:
+ """Get the questions for the current interview state"""
+ questions = {
+ InterviewState.GATHERING_BASICS: [
+ {
+ "field": "study_name",
+ "question": "What would you like to name this study?",
+ "hint": "Use snake_case, e.g., 'bracket_mass_optimization'",
+ "required": True
+ },
+ {
+ "field": "category",
+ "question": "What category should this study be in?",
+ "hint": "e.g., 'Simple_Bracket', 'M1_Mirror', or leave blank for root",
+ "required": False
+ },
+ {
+ "field": "description",
+ "question": "Briefly describe what you're trying to optimize",
+ "hint": "e.g., 'Minimize bracket mass while maintaining stiffness'",
+ "required": True
+ }
+ ],
+ InterviewState.GATHERING_MODEL: [
+ {
+ "field": "sim_file",
+ "question": "What is the path to your simulation (.sim) file?",
+ "hint": "Relative path from the study folder, e.g., '1_setup/Model_sim1.sim'",
+ "required": True
+ }
+ ],
+ InterviewState.GATHERING_VARIABLES: [
+ {
+ "field": "design_variable",
+ "question": "What parameters do you want to optimize?",
+ "hint": "Tell me the NX expression names and their bounds",
+ "required": True,
+ "multi": True
+ }
+ ],
+ InterviewState.GATHERING_EXTRACTORS: [
+ {
+ "field": "extractor",
+ "question": "What physics quantities do you want to extract from FEA?",
+ "hint": "e.g., mass, max displacement, max stress, frequency, Zernike WFE",
+ "required": True,
+ "multi": True
+ }
+ ],
+ InterviewState.GATHERING_OBJECTIVES: [
+ {
+ "field": "objective",
+ "question": "What do you want to optimize?",
+ "hint": "Tell me which extracted quantities to minimize or maximize",
+ "required": True,
+ "multi": True
+ }
+ ],
+ InterviewState.GATHERING_CONSTRAINTS: [
+ {
+ "field": "constraint",
+ "question": "Do you have any constraints? (e.g., max stress, min frequency)",
+ "hint": "You can say 'none' if you don't have any",
+ "required": False,
+ "multi": True
+ }
+ ],
+ InterviewState.GATHERING_SETTINGS: [
+ {
+ "field": "algorithm",
+ "question": "Which optimization algorithm would you like to use?",
+ "hint": "Options: TPE (default), CMA-ES, NSGA-II, RandomSearch",
+ "required": False
+ },
+ {
+ "field": "max_trials",
+ "question": "How many trials (FEA evaluations) should we run?",
+ "hint": "Default is 100. More trials = better results but longer runtime",
+ "required": False
+ }
+ ],
+ InterviewState.REVIEW: [
+ {
+ "field": "confirm",
+ "question": "Does this configuration look correct? (yes/no)",
+ "required": True
+ }
+ ]
+ }
+ return questions.get(self.state, [])
+
+ def record_answer(self, field: str, value: Any) -> Dict[str, Any]:
+ """Record an answer and potentially advance the state"""
+ self.questions_asked.append(field)
+
+ # Handle different field types
+ if field == "study_name":
+ self.data.study_name = value
+ elif field == "category":
+ self.data.category = value if value else None
+ elif field == "description":
+ self.data.description = value
+ elif field == "sim_file":
+ self.data.sim_file = value
+ elif field == "design_variable":
+ # Value should be a dict with name, min, max, etc.
+ if isinstance(value, dict):
+ self.data.design_variables.append(value)
+ elif isinstance(value, list):
+ self.data.design_variables.extend(value)
+ elif field == "extractor":
+ if isinstance(value, dict):
+ self.data.extractors.append(value)
+ elif isinstance(value, list):
+ self.data.extractors.extend(value)
+ elif field == "objective":
+ if isinstance(value, dict):
+ self.data.objectives.append(value)
+ elif isinstance(value, list):
+ self.data.objectives.extend(value)
+ elif field == "constraint":
+ if value and value.lower() not in ["none", "no", "skip"]:
+ if isinstance(value, dict):
+ self.data.constraints.append(value)
+ elif isinstance(value, list):
+ self.data.constraints.extend(value)
+ elif field == "algorithm":
+ if value in ["TPE", "CMA-ES", "NSGA-II", "RandomSearch"]:
+ self.data.algorithm = value
+ elif field == "max_trials":
+ try:
+ self.data.max_trials = int(value)
+ except (ValueError, TypeError):
+ pass
+ elif field == "confirm":
+ if value.lower() in ["yes", "y", "confirm", "ok"]:
+ self.state = InterviewState.COMPLETED
+
+ return {
+ "state": self.state.value,
+ "recorded": {field: value},
+ "data_so_far": self.get_summary()
+ }
+
+ def advance_state(self) -> Dict[str, Any]:
+ """Advance to the next interview state"""
+ state_order = [
+ InterviewState.NOT_STARTED,
+ InterviewState.GATHERING_BASICS,
+ InterviewState.GATHERING_MODEL,
+ InterviewState.GATHERING_VARIABLES,
+ InterviewState.GATHERING_EXTRACTORS,
+ InterviewState.GATHERING_OBJECTIVES,
+ InterviewState.GATHERING_CONSTRAINTS,
+ InterviewState.GATHERING_SETTINGS,
+ InterviewState.REVIEW,
+ InterviewState.COMPLETED
+ ]
+
+ current_idx = state_order.index(self.state)
+ if current_idx < len(state_order) - 1:
+ self.state = state_order[current_idx + 1]
+
+ return {
+ "state": self.state.value,
+ "next_questions": self.get_current_questions()
+ }
+
+ def get_summary(self) -> Dict[str, Any]:
+ """Get a summary of collected data"""
+ return {
+ "study_name": self.data.study_name,
+ "category": self.data.category,
+ "description": self.data.description,
+ "model": self.data.sim_file,
+ "design_variables": len(self.data.design_variables),
+ "extractors": len(self.data.extractors),
+ "objectives": len(self.data.objectives),
+ "constraints": len(self.data.constraints),
+ "algorithm": self.data.algorithm,
+ "max_trials": self.data.max_trials
+ }
+
+ def get_progress(self) -> Dict[str, Any]:
+ """Get interview progress information"""
+ state_progress = {
+ InterviewState.NOT_STARTED: 0,
+ InterviewState.GATHERING_BASICS: 15,
+ InterviewState.GATHERING_MODEL: 25,
+ InterviewState.GATHERING_VARIABLES: 40,
+ InterviewState.GATHERING_EXTRACTORS: 55,
+ InterviewState.GATHERING_OBJECTIVES: 70,
+ InterviewState.GATHERING_CONSTRAINTS: 80,
+ InterviewState.GATHERING_SETTINGS: 90,
+ InterviewState.REVIEW: 95,
+ InterviewState.COMPLETED: 100
+ }
+
+ return {
+ "state": self.state.value,
+ "progress_percent": state_progress.get(self.state, 0),
+ "summary": self.get_summary(),
+ "current_questions": self.get_current_questions()
+ }
+
+ def validate(self) -> Dict[str, Any]:
+ """Validate the collected data before finalizing"""
+ errors = []
+ warnings = []
+
+ # Required fields
+ if not self.data.study_name:
+ errors.append("Study name is required")
+
+ if not self.data.design_variables:
+ errors.append("At least one design variable is required")
+
+ if not self.data.extractors:
+ errors.append("At least one extractor is required")
+
+ if not self.data.objectives:
+ errors.append("At least one objective is required")
+
+ # Warnings
+ if not self.data.sim_file:
+ warnings.append("No simulation file specified - you'll need to add one manually")
+
+ if not self.data.constraints:
+ warnings.append("No constraints defined - optimization will be unconstrained")
+
+ return {
+ "valid": len(errors) == 0,
+ "errors": errors,
+ "warnings": warnings
+ }
+
+ def finalize(self) -> Dict[str, Any]:
+ """Generate the final atomizer_spec.json"""
+ validation = self.validate()
+
+ if not validation["valid"]:
+ return {
+ "success": False,
+ "errors": validation["errors"]
+ }
+
+ spec = self.data.to_spec()
+
+ return {
+ "success": True,
+ "spec": spec,
+ "warnings": validation.get("warnings", [])
+ }
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Serialize engine state for persistence"""
+ return {
+ "state": self.state.value,
+ "data": {
+ "study_name": self.data.study_name,
+ "category": self.data.category,
+ "description": self.data.description,
+ "goals": self.data.goals,
+ "sim_file": self.data.sim_file,
+ "prt_file": self.data.prt_file,
+ "solver_type": self.data.solver_type,
+ "design_variables": self.data.design_variables,
+ "extractors": self.data.extractors,
+ "objectives": self.data.objectives,
+ "constraints": self.data.constraints,
+ "algorithm": self.data.algorithm,
+ "max_trials": self.data.max_trials
+ },
+ "questions_asked": self.questions_asked,
+ "errors": self.errors
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "InterviewEngine":
+ """Restore engine from serialized state"""
+ engine = cls()
+ engine.state = InterviewState(data.get("state", "not_started"))
+
+ d = data.get("data", {})
+ engine.data.study_name = d.get("study_name")
+ engine.data.category = d.get("category")
+ engine.data.description = d.get("description")
+ engine.data.goals = d.get("goals", [])
+ engine.data.sim_file = d.get("sim_file")
+ engine.data.prt_file = d.get("prt_file")
+ engine.data.solver_type = d.get("solver_type", "nastran")
+ engine.data.design_variables = d.get("design_variables", [])
+ engine.data.extractors = d.get("extractors", [])
+ engine.data.objectives = d.get("objectives", [])
+ engine.data.constraints = d.get("constraints", [])
+ engine.data.algorithm = d.get("algorithm", "TPE")
+ engine.data.max_trials = d.get("max_trials", 100)
+
+ engine.questions_asked = data.get("questions_asked", [])
+ engine.errors = data.get("errors", [])
+
+ return engine
diff --git a/atomizer-dashboard/backend/api/services/session_manager.py b/atomizer-dashboard/backend/api/services/session_manager.py
index 37401cb2..d3a0e072 100644
--- a/atomizer-dashboard/backend/api/services/session_manager.py
+++ b/atomizer-dashboard/backend/api/services/session_manager.py
@@ -219,6 +219,18 @@ class SessionManager:
full_response = result["stdout"] or ""
if full_response:
+ # Check if response contains canvas modifications (from MCP tools)
+ import logging
+ logger = logging.getLogger(__name__)
+
+ modifications = self._extract_canvas_modifications(full_response)
+ logger.info(f"[SEND_MSG] Found {len(modifications)} canvas modifications to send")
+
+ for mod in modifications:
+ logger.info(f"[SEND_MSG] Sending canvas_modification: {mod.get('action')} {mod.get('nodeType')}")
+ yield {"type": "canvas_modification", "modification": mod}
+
+ # Always send the text response
yield {"type": "text", "content": full_response}
if result["returncode"] != 0 and result["stderr"]:
@@ -292,6 +304,90 @@ class SessionManager:
**({} if not db_record else {"db_record": db_record}),
}
+ def _extract_canvas_modifications(self, response: str) -> List[Dict]:
+ """
+ Extract canvas modification objects from Claude's response.
+
+ MCP tools like canvas_add_node return JSON with a 'modification' field.
+ This method finds and extracts those modifications so the frontend can apply them.
+ """
+ import re
+ import logging
+ logger = logging.getLogger(__name__)
+
+ modifications = []
+
+ # Debug: log what we're searching
+ logger.info(f"[CANVAS_MOD] Searching response ({len(response)} chars) for modifications")
+
+ # Check if "modification" even exists in the response
+ if '"modification"' not in response:
+ logger.info("[CANVAS_MOD] No 'modification' key found in response")
+ return modifications
+
+ try:
+ # Method 1: Look for JSON in code fences
+ code_block_pattern = r'```(?:json)?\s*([\s\S]*?)```'
+ for match in re.finditer(code_block_pattern, response):
+ block_content = match.group(1).strip()
+ try:
+ obj = json.loads(block_content)
+ if isinstance(obj, dict) and 'modification' in obj:
+ logger.info(f"[CANVAS_MOD] Found modification in code fence: {obj['modification']}")
+ modifications.append(obj['modification'])
+ except json.JSONDecodeError:
+ continue
+
+ # Method 2: Find JSON objects using proper brace matching
+ # This handles nested objects correctly
+ i = 0
+ while i < len(response):
+ if response[i] == '{':
+ # Found a potential JSON start, find matching close
+ brace_count = 1
+ j = i + 1
+ in_string = False
+ escape_next = False
+
+ while j < len(response) and brace_count > 0:
+ char = response[j]
+
+ if escape_next:
+ escape_next = False
+ elif char == '\\':
+ escape_next = True
+ elif char == '"' and not escape_next:
+ in_string = not in_string
+ elif not in_string:
+ if char == '{':
+ brace_count += 1
+ elif char == '}':
+ brace_count -= 1
+ j += 1
+
+ if brace_count == 0:
+ potential_json = response[i:j]
+ try:
+ obj = json.loads(potential_json)
+ if isinstance(obj, dict) and 'modification' in obj:
+ mod = obj['modification']
+ # Avoid duplicates
+ if mod not in modifications:
+ logger.info(f"[CANVAS_MOD] Found inline modification: action={mod.get('action')}, nodeType={mod.get('nodeType')}")
+ modifications.append(mod)
+ except json.JSONDecodeError as e:
+ # Not valid JSON, skip
+ pass
+ i = j
+ else:
+ i += 1
+
+ except Exception as e:
+ logger.error(f"[CANVAS_MOD] Error extracting modifications: {e}")
+
+ logger.info(f"[CANVAS_MOD] Extracted {len(modifications)} modification(s)")
+ return modifications
+
def _build_mcp_config(self, mode: Literal["user", "power"]) -> dict:
"""Build MCP configuration for Claude"""
return {
diff --git a/atomizer-dashboard/backend/api/services/spec_manager.py b/atomizer-dashboard/backend/api/services/spec_manager.py
new file mode 100644
index 00000000..727b28b1
--- /dev/null
+++ b/atomizer-dashboard/backend/api/services/spec_manager.py
@@ -0,0 +1,747 @@
+"""
+SpecManager Service
+
+Central service for managing AtomizerSpec v2.0.
+All spec modifications flow through this service.
+
+Features:
+- Load/save specs with validation
+- Atomic writes with conflict detection
+- Patch operations with JSONPath support
+- Node CRUD operations
+- Custom function support
+- WebSocket broadcast integration
+"""
+
+import hashlib
+import json
+import re
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
+
+# Add optimization_engine to path if needed
+ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
+if str(ATOMIZER_ROOT) not in sys.path:
+ sys.path.insert(0, str(ATOMIZER_ROOT))
+
+from optimization_engine.config.spec_models import (
+ AtomizerSpec,
+ DesignVariable,
+ Extractor,
+ Objective,
+ Constraint,
+ CanvasPosition,
+ CanvasEdge,
+ ExtractorType,
+ CustomFunction,
+ ExtractorOutput,
+ ValidationReport,
+)
+from optimization_engine.config.spec_validator import (
+ SpecValidator,
+ SpecValidationError,
+)
+
+
+class SpecManagerError(Exception):
+ """Base error for SpecManager operations."""
+ pass
+
+
+class SpecNotFoundError(SpecManagerError):
+ """Raised when spec file doesn't exist."""
+ pass
+
+
+class SpecConflictError(SpecManagerError):
+ """Raised when spec has been modified by another client."""
+
+ def __init__(self, message: str, current_hash: str):
+ super().__init__(message)
+ self.current_hash = current_hash
+
+
+class WebSocketSubscriber:
+ """Protocol for WebSocket subscribers."""
+
+ async def send_json(self, data: Dict[str, Any]) -> None:
+ """Send JSON data to subscriber."""
+ raise NotImplementedError
+
+
+class SpecManager:
+ """
+ Central service for managing AtomizerSpec.
+
+ All modifications go through this service to ensure:
+ - Validation on every change
+ - Atomic file writes
+ - Conflict detection via hashing
+ - WebSocket broadcast to all clients
+ """
+
+ SPEC_FILENAME = "atomizer_spec.json"
+
+ def __init__(self, study_path: Union[str, Path]):
+ """
+ Initialize SpecManager for a study.
+
+ Args:
+ study_path: Path to the study directory
+ """
+ self.study_path = Path(study_path)
+ self.spec_path = self.study_path / self.SPEC_FILENAME
+ self.validator = SpecValidator()
+ self._subscribers: List[WebSocketSubscriber] = []
+ self._last_hash: Optional[str] = None
+
+ # =========================================================================
+ # Core CRUD Operations
+ # =========================================================================
+
+ def load(self, validate: bool = True) -> AtomizerSpec:
+ """
+ Load and optionally validate the spec.
+
+ Args:
+ validate: Whether to validate the spec
+
+ Returns:
+ AtomizerSpec instance
+
+ Raises:
+ SpecNotFoundError: If spec file doesn't exist
+ SpecValidationError: If validation fails
+ """
+ if not self.spec_path.exists():
+ raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
+
+ with open(self.spec_path, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+
+ if validate:
+ self.validator.validate(data, strict=True)
+
+ spec = AtomizerSpec.model_validate(data)
+ self._last_hash = self._compute_hash(data)
+ return spec
+
+ def load_raw(self) -> Dict[str, Any]:
+ """
+ Load spec as raw dict without parsing.
+
+ Returns:
+ Raw spec dict
+
+ Raises:
+ SpecNotFoundError: If spec file doesn't exist
+ """
+ if not self.spec_path.exists():
+ raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
+
+ with open(self.spec_path, 'r', encoding='utf-8') as f:
+ return json.load(f)
+
+ def save(
+ self,
+ spec: Union[AtomizerSpec, Dict[str, Any]],
+ modified_by: str = "api",
+ expected_hash: Optional[str] = None
+ ) -> str:
+ """
+ Save spec with validation and broadcast.
+
+ Args:
+ spec: Spec to save (AtomizerSpec or dict)
+ modified_by: Who/what is making the change
+ expected_hash: If provided, verify current file hash matches
+
+ Returns:
+ New spec hash
+
+ Raises:
+ SpecValidationError: If validation fails
+ SpecConflictError: If expected_hash doesn't match current
+ """
+ # Convert to dict if needed
+ if isinstance(spec, AtomizerSpec):
+ data = spec.model_dump(mode='json')
+ else:
+ data = spec
+
+ # Check for conflicts if expected_hash provided
+ if expected_hash and self.spec_path.exists():
+ current_hash = self.get_hash()
+ if current_hash != expected_hash:
+ raise SpecConflictError(
+ "Spec was modified by another client",
+ current_hash=current_hash
+ )
+
+ # Update metadata
+ now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
+ data["meta"]["modified"] = now
+ data["meta"]["modified_by"] = modified_by
+
+ # Validate
+ self.validator.validate(data, strict=True)
+
+ # Compute new hash
+ new_hash = self._compute_hash(data)
+
+ # Atomic write (write to temp, then rename)
+ temp_path = self.spec_path.with_suffix('.tmp')
+ with open(temp_path, 'w', encoding='utf-8') as f:
+ json.dump(data, f, indent=2, ensure_ascii=False)
+
+ temp_path.replace(self.spec_path)
+
+ # Update cached hash
+ self._last_hash = new_hash
+
+ # Broadcast to subscribers
+ self._broadcast({
+ "type": "spec_updated",
+ "hash": new_hash,
+ "modified_by": modified_by,
+ "timestamp": now
+ })
+
+ return new_hash
+
+ def exists(self) -> bool:
+ """Check if spec file exists."""
+ return self.spec_path.exists()
+
+ def get_hash(self) -> str:
+ """Get current spec hash."""
+ if not self.spec_path.exists():
+ return ""
+ with open(self.spec_path, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+ return self._compute_hash(data)
+
+ def validate_and_report(self) -> ValidationReport:
+ """
+ Run full validation and return detailed report.
+
+ Returns:
+ ValidationReport with errors, warnings, summary
+ """
+ if not self.spec_path.exists():
+ raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
+
+ data = self.load_raw()
+ return self.validator.validate(data, strict=False)
+
+ # =========================================================================
+ # Patch Operations
+ # =========================================================================
+
+ def patch(
+ self,
+ path: str,
+ value: Any,
+ modified_by: str = "api"
+ ) -> AtomizerSpec:
+ """
+ Apply a JSONPath-style modification.
+
+ Args:
+ path: JSONPath like "design_variables[0].bounds.max"
+ value: New value to set
+ modified_by: Who/what is making the change
+
+ Returns:
+ Updated AtomizerSpec
+ """
+ data = self.load_raw()
+
+ # Validate the partial update
+ spec = AtomizerSpec.model_validate(data)
+ is_valid, errors = self.validator.validate_partial(path, value, spec)
+ if not is_valid:
+ raise SpecValidationError(f"Invalid update: {'; '.join(errors)}")
+
+ # Apply the patch
+ self._apply_patch(data, path, value)
+
+ # Save and return
+ self.save(data, modified_by)
+ return self.load(validate=False)
+
+ def _apply_patch(self, data: Dict, path: str, value: Any) -> None:
+ """
+ Apply a patch to the data dict.
+
+ Supports paths like:
+ - "meta.description"
+ - "design_variables[0].bounds.max"
+ - "objectives[1].weight"
+ """
+ parts = self._parse_path(path)
+ if not parts:
+ raise ValueError(f"Invalid path: {path}")
+
+ # Navigate to parent
+ current = data
+ for part in parts[:-1]:
+ if isinstance(current, list):
+ idx = int(part)
+ current = current[idx]
+ else:
+ current = current[part]
+
+ # Set final value
+ final_key = parts[-1]
+ if isinstance(current, list):
+ idx = int(final_key)
+ current[idx] = value
+ else:
+ current[final_key] = value
+
+ def _parse_path(self, path: str) -> List[str]:
+ """Parse JSONPath into parts."""
+ # Handle both dot notation and bracket notation
+ parts = []
+ for part in re.split(r'\.|\[|\]', path):
+ if part:
+ parts.append(part)
+ return parts
+
+ # =========================================================================
+ # Node Operations
+ # =========================================================================
+
+ def add_node(
+ self,
+ node_type: str,
+ node_data: Dict[str, Any],
+ modified_by: str = "canvas"
+ ) -> str:
+ """
+ Add a new node (design var, extractor, objective, constraint).
+
+ Args:
+ node_type: One of 'designVar', 'extractor', 'objective', 'constraint'
+ node_data: Node data without ID
+ modified_by: Who/what is making the change
+
+ Returns:
+ Generated node ID
+ """
+ data = self.load_raw()
+
+ # Generate ID
+ node_id = self._generate_id(node_type, data)
+ node_data["id"] = node_id
+
+ # Add canvas position if not provided
+ if "canvas_position" not in node_data:
+ node_data["canvas_position"] = self._auto_position(node_type, data)
+
+ # Add to appropriate section
+ section = self._get_section_for_type(node_type)
+
+ if section not in data or data[section] is None:
+ data[section] = []
+
+ data[section].append(node_data)
+
+ self.save(data, modified_by)
+
+ # Broadcast node addition
+ self._broadcast({
+ "type": "node_added",
+ "node_type": node_type,
+ "node_id": node_id,
+ "modified_by": modified_by
+ })
+
+ return node_id
+
+ def update_node(
+ self,
+ node_id: str,
+ updates: Dict[str, Any],
+ modified_by: str = "canvas"
+ ) -> None:
+ """
+ Update an existing node.
+
+ Args:
+ node_id: ID of the node to update
+ updates: Dict of fields to update
+ modified_by: Who/what is making the change
+ """
+ data = self.load_raw()
+
+ # Find and update the node
+ found = False
+ for section in ["design_variables", "extractors", "objectives", "constraints"]:
+ if section not in data or data[section] is None:
+ continue
+ for node in data[section]:
+ if node.get("id") == node_id:
+ node.update(updates)
+ found = True
+ break
+ if found:
+ break
+
+ if not found:
+ raise SpecManagerError(f"Node not found: {node_id}")
+
+ self.save(data, modified_by)
+
+ def remove_node(
+ self,
+ node_id: str,
+ modified_by: str = "canvas"
+ ) -> None:
+ """
+ Remove a node and all edges referencing it.
+
+ Args:
+ node_id: ID of the node to remove
+ modified_by: Who/what is making the change
+ """
+ data = self.load_raw()
+
+ # Find and remove node
+ removed = False
+ for section in ["design_variables", "extractors", "objectives", "constraints"]:
+ if section not in data or data[section] is None:
+ continue
+ original_len = len(data[section])
+ data[section] = [n for n in data[section] if n.get("id") != node_id]
+ if len(data[section]) < original_len:
+ removed = True
+ break
+
+ if not removed:
+ raise SpecManagerError(f"Node not found: {node_id}")
+
+ # Remove edges referencing this node
+ if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
+ data["canvas"]["edges"] = [
+ e for e in data["canvas"]["edges"]
+ if e.get("source") != node_id and e.get("target") != node_id
+ ]
+
+ self.save(data, modified_by)
+
+ # Broadcast node removal
+ self._broadcast({
+ "type": "node_removed",
+ "node_id": node_id,
+ "modified_by": modified_by
+ })
+
+ def update_node_position(
+ self,
+ node_id: str,
+ position: Dict[str, float],
+ modified_by: str = "canvas"
+ ) -> None:
+ """
+ Update a node's canvas position.
+
+ Args:
+ node_id: ID of the node
+ position: Dict with x, y coordinates
+ modified_by: Who/what is making the change
+ """
+ self.update_node(node_id, {"canvas_position": position}, modified_by)
+
+ def add_edge(
+ self,
+ source: str,
+ target: str,
+ modified_by: str = "canvas"
+ ) -> None:
+ """
+ Add a canvas edge between nodes.
+
+ Args:
+ source: Source node ID
+ target: Target node ID
+ modified_by: Who/what is making the change
+ """
+ data = self.load_raw()
+
+ # Initialize canvas section if needed
+ if "canvas" not in data or data["canvas"] is None:
+ data["canvas"] = {}
+ if "edges" not in data["canvas"] or data["canvas"]["edges"] is None:
+ data["canvas"]["edges"] = []
+
+ # Check for duplicate
+ for edge in data["canvas"]["edges"]:
+ if edge.get("source") == source and edge.get("target") == target:
+ return # Already exists
+
+ data["canvas"]["edges"].append({
+ "source": source,
+ "target": target
+ })
+
+ self.save(data, modified_by)
+
+ def remove_edge(
+ self,
+ source: str,
+ target: str,
+ modified_by: str = "canvas"
+ ) -> None:
+ """
+ Remove a canvas edge.
+
+ Args:
+ source: Source node ID
+ target: Target node ID
+ modified_by: Who/what is making the change
+ """
+ data = self.load_raw()
+
+ if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
+ data["canvas"]["edges"] = [
+ e for e in data["canvas"]["edges"]
+ if not (e.get("source") == source and e.get("target") == target)
+ ]
+
+ self.save(data, modified_by)
+
+ # =========================================================================
+ # Custom Function Support
+ # =========================================================================
+
+ def add_custom_function(
+ self,
+ name: str,
+ code: str,
+ outputs: List[str],
+ description: Optional[str] = None,
+ modified_by: str = "claude"
+ ) -> str:
+ """
+ Add a custom extractor function.
+
+ Args:
+ name: Function name
+ code: Python source code
+ outputs: List of output names
+ description: Optional description
+ modified_by: Who/what is making the change
+
+ Returns:
+ Generated extractor ID
+
+ Raises:
+ SpecValidationError: If Python syntax is invalid
+ """
+ # Validate Python syntax
+ try:
+ compile(code, f"", "exec")
+ except SyntaxError as e:
+ raise SpecValidationError(
+ f"Invalid Python syntax: {e.msg} at line {e.lineno}"
+ )
+
+ data = self.load_raw()
+
+ # Generate extractor ID
+ ext_id = self._generate_id("extractor", data)
+
+ # Create extractor
+ extractor = {
+ "id": ext_id,
+ "name": description or f"Custom: {name}",
+ "type": "custom_function",
+ "builtin": False,
+ "function": {
+ "name": name,
+ "module": "custom_extractors.dynamic",
+ "source_code": code
+ },
+ "outputs": [{"name": o, "metric": "custom"} for o in outputs],
+ "canvas_position": self._auto_position("extractor", data)
+ }
+
+ data["extractors"].append(extractor)
+ self.save(data, modified_by)
+
+ return ext_id
+
+ def update_custom_function(
+ self,
+ extractor_id: str,
+ code: Optional[str] = None,
+ outputs: Optional[List[str]] = None,
+ modified_by: str = "claude"
+ ) -> None:
+ """
+ Update an existing custom function.
+
+ Args:
+ extractor_id: ID of the custom extractor
+ code: New Python code (optional)
+ outputs: New outputs (optional)
+ modified_by: Who/what is making the change
+ """
+ data = self.load_raw()
+
+ # Find the extractor
+ extractor = None
+ for ext in data.get("extractors", []):
+ if ext.get("id") == extractor_id:
+ extractor = ext
+ break
+
+ if not extractor:
+ raise SpecManagerError(f"Extractor not found: {extractor_id}")
+
+ if extractor.get("type") != "custom_function":
+ raise SpecManagerError(f"Extractor {extractor_id} is not a custom function")
+
+ # Update code
+ if code is not None:
+ try:
+ compile(code, f"", "exec")
+ except SyntaxError as e:
+ raise SpecValidationError(
+ f"Invalid Python syntax: {e.msg} at line {e.lineno}"
+ )
+ if "function" not in extractor:
+ extractor["function"] = {}
+ extractor["function"]["source_code"] = code
+
+ # Update outputs
+ if outputs is not None:
+ extractor["outputs"] = [{"name": o, "metric": "custom"} for o in outputs]
+
+ self.save(data, modified_by)
+
+ # =========================================================================
+ # WebSocket Subscription
+ # =========================================================================
+
+ def subscribe(self, subscriber: WebSocketSubscriber) -> None:
+ """Subscribe to spec changes."""
+ if subscriber not in self._subscribers:
+ self._subscribers.append(subscriber)
+
+ def unsubscribe(self, subscriber: WebSocketSubscriber) -> None:
+ """Unsubscribe from spec changes."""
+ if subscriber in self._subscribers:
+ self._subscribers.remove(subscriber)
+
+ def _broadcast(self, message: Dict[str, Any]) -> None:
+ """Broadcast message to all subscribers."""
+ import asyncio
+
+ for subscriber in self._subscribers:
+ try:
+ # Handle both sync and async contexts
+ try:
+ loop = asyncio.get_running_loop()
+ loop.create_task(subscriber.send_json(message))
+ except RuntimeError:
+ # No running loop, try direct call if possible
+ pass
+ except Exception:
+ # Subscriber may have disconnected
+ pass
+
+ # =========================================================================
+ # Helper Methods
+ # =========================================================================
+
+ def _compute_hash(self, data: Dict) -> str:
+ """Compute hash of spec data for conflict detection."""
+ # Sort keys for consistent hashing
+ json_str = json.dumps(data, sort_keys=True, ensure_ascii=False)
+ return hashlib.sha256(json_str.encode()).hexdigest()[:16]
+
+ def _generate_id(self, node_type: str, data: Dict) -> str:
+ """Generate unique ID for a node type."""
+ prefix_map = {
+ "designVar": "dv",
+ "design_variable": "dv",
+ "extractor": "ext",
+ "objective": "obj",
+ "constraint": "con"
+ }
+ prefix = prefix_map.get(node_type, node_type[:3])
+
+ # Find existing IDs
+ section = self._get_section_for_type(node_type)
+ existing_ids: Set[str] = set()
+ if section in data and data[section]:
+ existing_ids = {n.get("id", "") for n in data[section]}
+
+ # Generate next available ID
+ for i in range(1, 1000):
+ new_id = f"{prefix}_{i:03d}"
+ if new_id not in existing_ids:
+ return new_id
+
+ raise SpecManagerError(f"Cannot generate ID for {node_type}: too many nodes")
+
+ def _get_section_for_type(self, node_type: str) -> str:
+ """Map node type to spec section name."""
+ section_map = {
+ "designVar": "design_variables",
+ "design_variable": "design_variables",
+ "extractor": "extractors",
+ "objective": "objectives",
+ "constraint": "constraints"
+ }
+ return section_map.get(node_type, node_type + "s")
+
+ def _auto_position(self, node_type: str, data: Dict) -> Dict[str, float]:
+ """Calculate auto position for a new node."""
+ # Default x positions by type
+ x_positions = {
+ "designVar": 50,
+ "design_variable": 50,
+ "extractor": 740,
+ "objective": 1020,
+ "constraint": 1020
+ }
+
+ x = x_positions.get(node_type, 400)
+
+ # Find max y position for this type
+ section = self._get_section_for_type(node_type)
+ max_y = 0
+ if section in data and data[section]:
+ for node in data[section]:
+ pos = node.get("canvas_position", {})
+ y = pos.get("y", 0)
+ if y > max_y:
+ max_y = y
+
+ # Place below existing nodes
+ y = max_y + 100 if max_y > 0 else 100
+
+ return {"x": x, "y": y}
+
+
+# =========================================================================
+# Factory Function
+# =========================================================================
+
+def get_spec_manager(study_path: Union[str, Path]) -> SpecManager:
+ """
+ Get a SpecManager instance for a study.
+
+ Args:
+ study_path: Path to the study directory
+
+ Returns:
+ SpecManager instance
+ """
+ return SpecManager(study_path)
diff --git a/atomizer-dashboard/frontend/src/App.tsx b/atomizer-dashboard/frontend/src/App.tsx
index a48fc3d9..636be095 100644
--- a/atomizer-dashboard/frontend/src/App.tsx
+++ b/atomizer-dashboard/frontend/src/App.tsx
@@ -30,6 +30,7 @@ function App() {
{/* Canvas page - full screen, no sidebar */}
} />
+ } />
{/* Study pages - with sidebar layout */}
}>
diff --git a/atomizer-dashboard/frontend/src/components/ParallelCoordinatesPlot.tsx b/atomizer-dashboard/frontend/src/components/ParallelCoordinatesPlot.tsx
index 1abf894b..7b8c0600 100644
--- a/atomizer-dashboard/frontend/src/components/ParallelCoordinatesPlot.tsx
+++ b/atomizer-dashboard/frontend/src/components/ParallelCoordinatesPlot.tsx
@@ -26,8 +26,8 @@ interface DesignVariable {
name: string;
parameter?: string; // Optional: the actual parameter name if different from name
unit?: string;
- min: number;
- max: number;
+ min?: number;
+ max?: number;
}
interface Constraint {
diff --git a/atomizer-dashboard/frontend/src/components/ParetoPlot.tsx b/atomizer-dashboard/frontend/src/components/ParetoPlot.tsx
index a865ad35..164ed868 100644
--- a/atomizer-dashboard/frontend/src/components/ParetoPlot.tsx
+++ b/atomizer-dashboard/frontend/src/components/ParetoPlot.tsx
@@ -8,14 +8,15 @@ import { ScatterChart, Scatter, Line, XAxis, YAxis, CartesianGrid, Tooltip, Cell
interface ParetoTrial {
trial_number: number;
- values: [number, number];
+ values: number[]; // Support variable number of objectives
params: Record;
constraint_satisfied?: boolean;
}
interface Objective {
name: string;
- type: 'minimize' | 'maximize';
+ type?: 'minimize' | 'maximize';
+ direction?: 'minimize' | 'maximize'; // Alternative field used by some configs
unit?: string;
}
diff --git a/atomizer-dashboard/frontend/src/components/canvas/index.ts b/atomizer-dashboard/frontend/src/components/canvas/index.ts
index 2d8ab409..0f5ff9f2 100644
--- a/atomizer-dashboard/frontend/src/components/canvas/index.ts
+++ b/atomizer-dashboard/frontend/src/components/canvas/index.ts
@@ -1,5 +1,6 @@
// Main Canvas Component
export { AtomizerCanvas } from './AtomizerCanvas';
+export { SpecRenderer } from './SpecRenderer';
// Palette
export { NodePalette } from './palette/NodePalette';
diff --git a/atomizer-dashboard/frontend/src/components/chat/ChatMessage.tsx b/atomizer-dashboard/frontend/src/components/chat/ChatMessage.tsx
index c18f4262..647c8ad4 100644
--- a/atomizer-dashboard/frontend/src/components/chat/ChatMessage.tsx
+++ b/atomizer-dashboard/frontend/src/components/chat/ChatMessage.tsx
@@ -5,7 +5,7 @@ import { ToolCallCard, ToolCall } from './ToolCallCard';
export interface Message {
id: string;
- role: 'user' | 'assistant';
+ role: 'user' | 'assistant' | 'system';
content: string;
timestamp: Date;
isStreaming?: boolean;
@@ -18,6 +18,18 @@ interface ChatMessageProps {
export const ChatMessage: React.FC = ({ message }) => {
const isAssistant = message.role === 'assistant';
+ const isSystem = message.role === 'system';
+
+ // System messages are displayed centered with special styling
+ if (isSystem) {
+ return (
+
+
+ {message.content}
+
+
+ );
+ }
return (
= ({
const messagesEndRef = useRef
(null);
const [isExpanded, setIsExpanded] = useState(false);
+ // Get canvas state and modification functions from the store
+ const { nodes, edges, addNode, updateNodeData, selectNode, deleteSelected } = useCanvasStore();
+
+ // Build canvas state for chat context
+ const canvasState: CanvasState | null = useMemo(() => {
+ if (nodes.length === 0) return null;
+ return {
+ nodes: nodes.map(n => ({
+ id: n.id,
+ type: n.type,
+ data: n.data,
+ position: n.position,
+ })),
+ edges: edges.map(e => ({
+ id: e.id,
+ source: e.source,
+ target: e.target,
+ })),
+ studyName: selectedStudy?.name || selectedStudy?.id,
+ };
+ }, [nodes, edges, selectedStudy]);
+
+ // Track position offset for multiple node additions
+ const nodeAddCountRef = useRef(0);
+
+ // Handle canvas modifications from the assistant
+ const handleCanvasModification = React.useCallback((modification: CanvasModification) => {
+ console.log('Canvas modification from assistant:', modification);
+
+ switch (modification.action) {
+ case 'add_node':
+ if (modification.nodeType) {
+ const nodeType = modification.nodeType as NodeType;
+ // Calculate position: offset each new node so they don't stack
+ const basePosition = modification.position || { x: 100, y: 100 };
+ const offset = nodeAddCountRef.current * 120;
+ const position = {
+ x: basePosition.x,
+ y: basePosition.y + offset,
+ };
+ nodeAddCountRef.current += 1;
+ // Reset counter after a delay (for batch operations)
+ setTimeout(() => { nodeAddCountRef.current = 0; }, 2000);
+
+ addNode(nodeType, position, modification.data);
+ console.log(`Added ${nodeType} node at position:`, position);
+ }
+ break;
+
+ case 'update_node':
+ if (modification.nodeId && modification.data) {
+ updateNodeData(modification.nodeId, modification.data);
+ }
+ break;
+
+ case 'remove_node':
+ if (modification.nodeId) {
+ selectNode(modification.nodeId);
+ deleteSelected();
+ }
+ break;
+
+ // Edge operations would need additional store methods
+ case 'add_edge':
+ case 'remove_edge':
+ console.warn('Edge modification not yet implemented:', modification);
+ break;
+ }
+ }, [addNode, updateNodeData, selectNode, deleteSelected]);
+
const {
messages,
isThinking,
@@ -41,22 +113,38 @@ export const ChatPane: React.FC = ({
sendMessage,
clearMessages,
switchMode,
+ updateCanvasState,
} = useChat({
studyId: selectedStudy?.id,
mode: 'user',
useWebSocket: true,
+ canvasState,
onError: (err) => console.error('Chat error:', err),
+ onCanvasModification: handleCanvasModification,
});
+ // Keep canvas state synced with chat
+ useEffect(() => {
+ updateCanvasState(canvasState);
+ }, [canvasState, updateCanvasState]);
+
// Auto-scroll to bottom when new messages arrive
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [messages, isThinking]);
- // Welcome message based on study context
- const welcomeMessage = selectedStudy
- ? `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`
- : 'Select a study to get started, or ask me to help you create a new one.';
+ // Welcome message based on study and canvas context
+ const welcomeMessage = useMemo(() => {
+ if (selectedStudy) {
+ return `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`;
+ }
+ if (nodes.length > 0) {
+ const dvCount = nodes.filter(n => n.type === 'designVar').length;
+ const objCount = nodes.filter(n => n.type === 'objective').length;
+ return `I can see your canvas with ${dvCount} design variables and ${objCount} objectives. Ask me to analyze, validate, or create a study from this setup.`;
+ }
+ return 'Select a study to get started, or build an optimization in the Canvas Builder.';
+ }, [selectedStudy, nodes]);
// Collapsed state - just show toggle button
if (!isOpen) {
diff --git a/atomizer-dashboard/frontend/src/components/chat/ToolCallCard.tsx b/atomizer-dashboard/frontend/src/components/chat/ToolCallCard.tsx
index ab8f6028..88d2a8d1 100644
--- a/atomizer-dashboard/frontend/src/components/chat/ToolCallCard.tsx
+++ b/atomizer-dashboard/frontend/src/components/chat/ToolCallCard.tsx
@@ -30,22 +30,25 @@ interface ToolCallCardProps {
}
// Map tool names to friendly labels and icons
-const TOOL_INFO: Record }> = {
+const TOOL_INFO: Record; color?: string }> = {
// Study tools
list_studies: { label: 'Listing Studies', icon: Database },
get_study_status: { label: 'Getting Status', icon: FileSearch },
- create_study: { label: 'Creating Study', icon: Settings },
+ create_study: { label: 'Creating Study', icon: Settings, color: 'text-green-400' },
// Optimization tools
- run_optimization: { label: 'Starting Optimization', icon: Play },
+ run_optimization: { label: 'Starting Optimization', icon: Play, color: 'text-blue-400' },
stop_optimization: { label: 'Stopping Optimization', icon: XCircle },
get_optimization_status: { label: 'Checking Progress', icon: BarChart2 },
// Analysis tools
get_trial_data: { label: 'Querying Trials', icon: Database },
+ query_trials: { label: 'Querying Trials', icon: Database },
+ get_trial_details: { label: 'Getting Trial Details', icon: FileSearch },
analyze_convergence: { label: 'Analyzing Convergence', icon: BarChart2 },
compare_trials: { label: 'Comparing Trials', icon: BarChart2 },
get_best_design: { label: 'Getting Best Design', icon: CheckCircle },
+ get_optimization_summary: { label: 'Getting Summary', icon: BarChart2 },
// Reporting tools
generate_report: { label: 'Generating Report', icon: FileText },
@@ -56,6 +59,25 @@ const TOOL_INFO: Record = ({ toolCall }) => {
)}
{/* Tool icon */}
-
+
{/* Label */}
{info.label}
diff --git a/atomizer-dashboard/frontend/src/hooks/index.ts b/atomizer-dashboard/frontend/src/hooks/index.ts
index 2cac279f..c3280459 100644
--- a/atomizer-dashboard/frontend/src/hooks/index.ts
+++ b/atomizer-dashboard/frontend/src/hooks/index.ts
@@ -3,3 +3,27 @@ export { useCanvasStore } from './useCanvasStore';
export type { OptimizationConfig } from './useCanvasStore';
export { useCanvasChat } from './useCanvasChat';
export { useIntentParser } from './useIntentParser';
+
+// Spec Store (AtomizerSpec v2.0)
+export {
+ useSpecStore,
+ useSpec,
+ useSpecLoading,
+ useSpecError,
+ useSpecValidation,
+ useSelectedNodeId,
+ useSelectedEdgeId,
+ useSpecHash,
+ useSpecIsDirty,
+ useDesignVariables,
+ useExtractors,
+ useObjectives,
+ useConstraints,
+ useCanvasEdges,
+ useSelectedNode,
+} from './useSpecStore';
+
+// WebSocket Sync
+export { useSpecWebSocket } from './useSpecWebSocket';
+export type { ConnectionStatus } from './useSpecWebSocket';
+export { ConnectionStatusIndicator } from '../components/canvas/ConnectionStatusIndicator';
diff --git a/atomizer-dashboard/frontend/src/hooks/useChat.ts b/atomizer-dashboard/frontend/src/hooks/useChat.ts
index 0c679bac..44851640 100644
--- a/atomizer-dashboard/frontend/src/hooks/useChat.ts
+++ b/atomizer-dashboard/frontend/src/hooks/useChat.ts
@@ -11,12 +11,25 @@ export interface CanvasState {
studyPath?: string;
}
+export interface CanvasModification {
+ action: 'add_node' | 'update_node' | 'remove_node' | 'add_edge' | 'remove_edge';
+ nodeType?: string;
+ nodeId?: string;
+ edgeId?: string;
+ data?: Record;
+ source?: string;
+ target?: string;
+ position?: { x: number; y: number };
+}
+
interface UseChatOptions {
studyId?: string | null;
mode?: ChatMode;
useWebSocket?: boolean;
canvasState?: CanvasState | null;
onError?: (error: string) => void;
+ onCanvasModification?: (modification: CanvasModification) => void;
+ onSpecUpdated?: (spec: any) => void; // Called when Claude modifies the spec
}
interface ChatState {
@@ -35,6 +48,8 @@ export function useChat({
useWebSocket = true,
canvasState: initialCanvasState,
onError,
+ onCanvasModification,
+ onSpecUpdated,
}: UseChatOptions = {}) {
const [state, setState] = useState({
messages: [],
@@ -49,6 +64,23 @@ export function useChat({
// Track canvas state for sending with messages
const canvasStateRef = useRef(initialCanvasState || null);
+ // Sync mode prop changes to internal state (triggers WebSocket reconnect)
+ useEffect(() => {
+ if (mode !== state.mode) {
+ console.log(`[useChat] Mode prop changed from ${state.mode} to ${mode}, triggering reconnect`);
+ // Close existing WebSocket
+ wsRef.current?.close();
+ wsRef.current = null;
+ // Update internal state to trigger reconnect
+ setState((prev) => ({
+ ...prev,
+ mode,
+ sessionId: null,
+ isConnected: false,
+ }));
+ }
+ }, [mode]);
+
const abortControllerRef = useRef(null);
const conversationHistoryRef = useRef>([]);
const wsRef = useRef(null);
@@ -82,9 +114,16 @@ export function useChat({
const data = await response.json();
setState((prev) => ({ ...prev, sessionId: data.session_id }));
- // Connect WebSocket
+ // Connect WebSocket - use backend directly in dev mode
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
- const wsUrl = `${protocol}//${window.location.host}/api/claude/sessions/${data.session_id}/ws`;
+ // Use port 8001 to match start-dashboard.bat
+ const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
+ // Both modes use the same WebSocket - mode is handled by session config
+ // Power mode uses --dangerously-skip-permissions in CLI
+ // User mode uses --allowedTools to restrict access
+ const wsPath = `/api/claude/sessions/${data.session_id}/ws`;
+ const wsUrl = `${protocol}//${backendHost}${wsPath}`;
+ console.log(`[useChat] Connecting to WebSocket (${state.mode} mode): ${wsUrl}`);
const ws = new WebSocket(wsUrl);
ws.onopen = () => {
@@ -126,6 +165,9 @@ export function useChat({
// Handle WebSocket messages
const handleWebSocketMessage = useCallback((data: any) => {
+ // Debug: log all incoming WebSocket messages
+ console.log('[useChat] WebSocket message received:', data.type, data);
+
switch (data.type) {
case 'text':
currentMessageRef.current += data.content || '';
@@ -212,11 +254,51 @@ export function useChat({
// Canvas state was updated - could show notification
break;
+ case 'canvas_modification':
+ // Assistant wants to modify the canvas (from MCP tools in user mode)
+ console.log('[useChat] Received canvas_modification:', data.modification);
+ if (onCanvasModification && data.modification) {
+ console.log('[useChat] Calling onCanvasModification callback');
+ onCanvasModification(data.modification);
+ } else {
+ console.warn('[useChat] canvas_modification received but no handler or modification:', {
+ hasCallback: !!onCanvasModification,
+ modification: data.modification
+ });
+ }
+ break;
+
+ case 'spec_updated':
+ // Assistant modified the spec - we receive the full updated spec
+ console.log('[useChat] Spec updated by assistant:', data.tool, data.reason);
+ if (onSpecUpdated && data.spec) {
+ // Directly update the canvas with the new spec
+ onSpecUpdated(data.spec);
+ }
+ break;
+
+ case 'spec_modified':
+ // Legacy: Assistant modified the spec directly (from power mode write tools)
+ console.log('[useChat] Spec was modified by assistant (legacy):', data.tool, data.changes);
+ // Treat this as a canvas modification to trigger reload
+ if (onCanvasModification) {
+ // Create a synthetic modification event to trigger canvas refresh
+ onCanvasModification({
+ action: 'add_node', // Use add_node as it triggers refresh
+ data: {
+ _refresh: true,
+ tool: data.tool,
+ changes: data.changes,
+ },
+ });
+ }
+ break;
+
case 'pong':
// Heartbeat response - ignore
break;
}
- }, [onError]);
+ }, [onError, onCanvasModification]);
// Switch mode (requires new session)
const switchMode = useCallback(async (newMode: ChatMode) => {
@@ -462,6 +544,18 @@ export function useChat({
}
}, [useWebSocket]);
+ // Notify backend when user edits canvas (so Claude sees the changes)
+ const notifyCanvasEdit = useCallback((spec: any) => {
+ if (useWebSocket && wsRef.current?.readyState === WebSocket.OPEN) {
+ wsRef.current.send(
+ JSON.stringify({
+ type: 'canvas_edit',
+ spec: spec,
+ })
+ );
+ }
+ }, [useWebSocket]);
+
return {
messages: state.messages,
isThinking: state.isThinking,
@@ -475,5 +569,6 @@ export function useChat({
cancelRequest,
switchMode,
updateCanvasState,
+ notifyCanvasEdit,
};
}
diff --git a/atomizer-dashboard/frontend/src/hooks/useClaudeCode.ts b/atomizer-dashboard/frontend/src/hooks/useClaudeCode.ts
new file mode 100644
index 00000000..ca9f8db4
--- /dev/null
+++ b/atomizer-dashboard/frontend/src/hooks/useClaudeCode.ts
@@ -0,0 +1,349 @@
+/**
+ * Hook for Claude Code CLI integration
+ *
+ * Connects to backend that spawns actual Claude Code CLI processes.
+ * This gives full power: file editing, command execution, etc.
+ *
+ * Unlike useChat (which uses MCP tools), this hook:
+ * - Spawns actual Claude Code CLI in the backend
+ * - Has full file system access
+ * - Can edit files directly (not just return instructions)
+ * - Uses Opus 4.5 model
+ * - Has all Claude Code capabilities
+ */
+
+import { useState, useCallback, useRef, useEffect } from 'react';
+import { Message } from '../components/chat/ChatMessage';
+import { useCanvasStore } from './useCanvasStore';
+
+export interface CanvasState {
+ nodes: any[];
+ edges: any[];
+ studyName?: string;
+ studyPath?: string;
+}
+
+interface UseClaudeCodeOptions {
+ studyId?: string | null;
+ canvasState?: CanvasState | null;
+ onError?: (error: string) => void;
+ onCanvasRefresh?: (studyId: string) => void;
+}
+
+interface ClaudeCodeState {
+ messages: Message[];
+ isThinking: boolean;
+ error: string | null;
+ sessionId: string | null;
+ isConnected: boolean;
+ workingDir: string | null;
+}
+
+export function useClaudeCode({
+ studyId,
+ canvasState: initialCanvasState,
+ onError,
+ onCanvasRefresh,
+}: UseClaudeCodeOptions = {}) {
+ const [state, setState] = useState({
+ messages: [],
+ isThinking: false,
+ error: null,
+ sessionId: null,
+ isConnected: false,
+ workingDir: null,
+ });
+
+ // Track canvas state for sending with messages
+ const canvasStateRef = useRef(initialCanvasState || null);
+ const wsRef = useRef(null);
+ const currentMessageRef = useRef('');
+ const reconnectAttempts = useRef(0);
+ const maxReconnectAttempts = 3;
+
+ // Keep canvas state in sync with prop changes
+ useEffect(() => {
+ if (initialCanvasState) {
+ canvasStateRef.current = initialCanvasState;
+ }
+ }, [initialCanvasState]);
+
+ // Get canvas store for auto-refresh
+ const { loadFromConfig } = useCanvasStore();
+
+ // Connect to Claude Code WebSocket
+ useEffect(() => {
+ const connect = () => {
+ const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ // In development, connect directly to backend (bypass Vite proxy for WebSockets)
+ // Use port 8001 to match start-dashboard.bat
+ const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
+
+ // Use study-specific endpoint if studyId provided
+ const wsUrl = studyId
+ ? `${protocol}//${backendHost}/api/claude-code/ws/${encodeURIComponent(studyId)}`
+ : `${protocol}//${backendHost}/api/claude-code/ws`;
+
+ console.log('[ClaudeCode] Connecting to:', wsUrl);
+ const ws = new WebSocket(wsUrl);
+
+ ws.onopen = () => {
+ console.log('[ClaudeCode] Connected');
+ setState((prev) => ({ ...prev, isConnected: true, error: null }));
+ reconnectAttempts.current = 0;
+
+ // If no studyId in URL, send init message
+ if (!studyId) {
+ ws.send(JSON.stringify({ type: 'init', study_id: null }));
+ }
+ };
+
+ ws.onclose = () => {
+ console.log('[ClaudeCode] Disconnected');
+ setState((prev) => ({ ...prev, isConnected: false }));
+
+ // Attempt reconnection
+ if (reconnectAttempts.current < maxReconnectAttempts) {
+ reconnectAttempts.current++;
+ console.log(`[ClaudeCode] Reconnecting... attempt ${reconnectAttempts.current}`);
+ setTimeout(connect, 2000 * reconnectAttempts.current);
+ }
+ };
+
+ ws.onerror = (event) => {
+ console.error('[ClaudeCode] WebSocket error:', event);
+ setState((prev) => ({ ...prev, isConnected: false }));
+ onError?.('Claude Code connection error');
+ };
+
+ ws.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+ handleWebSocketMessage(data);
+ } catch (e) {
+ console.error('[ClaudeCode] Failed to parse message:', e);
+ }
+ };
+
+ wsRef.current = ws;
+ };
+
+ connect();
+
+ return () => {
+ reconnectAttempts.current = maxReconnectAttempts; // Prevent reconnection on unmount
+ wsRef.current?.close();
+ wsRef.current = null;
+ };
+ }, [studyId]);
+
+ // Handle WebSocket messages
+ const handleWebSocketMessage = useCallback(
+ (data: any) => {
+ switch (data.type) {
+ case 'initialized':
+ console.log('[ClaudeCode] Session initialized:', data.session_id);
+ setState((prev) => ({
+ ...prev,
+ sessionId: data.session_id,
+ workingDir: data.working_dir || null,
+ }));
+ break;
+
+ case 'text':
+ currentMessageRef.current += data.content || '';
+ setState((prev) => ({
+ ...prev,
+ messages: prev.messages.map((msg, idx) =>
+ idx === prev.messages.length - 1 && msg.role === 'assistant'
+ ? { ...msg, content: currentMessageRef.current }
+ : msg
+ ),
+ }));
+ break;
+
+ case 'done':
+ setState((prev) => ({
+ ...prev,
+ isThinking: false,
+ messages: prev.messages.map((msg, idx) =>
+ idx === prev.messages.length - 1 && msg.role === 'assistant'
+ ? { ...msg, isStreaming: false }
+ : msg
+ ),
+ }));
+ currentMessageRef.current = '';
+ break;
+
+ case 'error':
+ console.error('[ClaudeCode] Error:', data.content);
+ setState((prev) => ({
+ ...prev,
+ isThinking: false,
+ error: data.content || 'Unknown error',
+ }));
+ onError?.(data.content || 'Unknown error');
+ currentMessageRef.current = '';
+ break;
+
+ case 'refresh_canvas':
+ // Claude made file changes - trigger canvas refresh
+ console.log('[ClaudeCode] Canvas refresh requested:', data.reason);
+ if (data.study_id) {
+ onCanvasRefresh?.(data.study_id);
+ reloadCanvasFromStudy(data.study_id);
+ }
+ break;
+
+ case 'canvas_updated':
+ console.log('[ClaudeCode] Canvas state updated');
+ break;
+
+ case 'pong':
+ // Heartbeat response
+ break;
+
+ default:
+ console.log('[ClaudeCode] Unknown message type:', data.type);
+ }
+ },
+ [onError, onCanvasRefresh]
+ );
+
+ // Reload canvas from study config
+ const reloadCanvasFromStudy = useCallback(
+ async (studyIdToReload: string) => {
+ try {
+ console.log('[ClaudeCode] Reloading canvas for study:', studyIdToReload);
+
+ // Fetch fresh config from backend
+ const response = await fetch(`/api/optimization/studies/${encodeURIComponent(studyIdToReload)}/config`);
+ if (!response.ok) {
+ throw new Error(`Failed to fetch config: ${response.status}`);
+ }
+
+ const data = await response.json();
+ const config = data.config; // API returns { config: ..., path: ..., study_id: ... }
+
+ // Reload canvas with new config
+ loadFromConfig(config);
+
+ // Add system message about refresh
+ const refreshMessage: Message = {
+ id: `msg_${Date.now()}_refresh`,
+ role: 'system',
+ content: `Canvas refreshed with latest changes from ${studyIdToReload}`,
+ timestamp: new Date(),
+ };
+
+ setState((prev) => ({
+ ...prev,
+ messages: [...prev.messages, refreshMessage],
+ }));
+ } catch (error) {
+ console.error('[ClaudeCode] Failed to reload canvas:', error);
+ }
+ },
+ [loadFromConfig]
+ );
+
+ const generateMessageId = () => {
+ return `msg_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
+ };
+
+ const sendMessage = useCallback(
+ async (content: string) => {
+ if (!content.trim() || state.isThinking) return;
+
+ if (!wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) {
+ onError?.('Not connected to Claude Code');
+ return;
+ }
+
+ // Add user message
+ const userMessage: Message = {
+ id: generateMessageId(),
+ role: 'user',
+ content: content.trim(),
+ timestamp: new Date(),
+ };
+
+ // Add assistant message placeholder
+ const assistantMessage: Message = {
+ id: generateMessageId(),
+ role: 'assistant',
+ content: '',
+ timestamp: new Date(),
+ isStreaming: true,
+ };
+
+ setState((prev) => ({
+ ...prev,
+ messages: [...prev.messages, userMessage, assistantMessage],
+ isThinking: true,
+ error: null,
+ }));
+
+ // Reset current message tracking
+ currentMessageRef.current = '';
+
+ // Send message via WebSocket with canvas state
+ wsRef.current.send(
+ JSON.stringify({
+ type: 'message',
+ content: content.trim(),
+ canvas_state: canvasStateRef.current || undefined,
+ })
+ );
+ },
+ [state.isThinking, onError]
+ );
+
+ const clearMessages = useCallback(() => {
+ setState((prev) => ({
+ ...prev,
+ messages: [],
+ error: null,
+ }));
+ currentMessageRef.current = '';
+ }, []);
+
+ // Update canvas state (call this when canvas changes)
+ const updateCanvasState = useCallback((newCanvasState: CanvasState | null) => {
+ canvasStateRef.current = newCanvasState;
+
+ // Also send to backend to update context
+ if (wsRef.current?.readyState === WebSocket.OPEN) {
+ wsRef.current.send(
+ JSON.stringify({
+ type: 'set_canvas',
+ canvas_state: newCanvasState,
+ })
+ );
+ }
+ }, []);
+
+ // Send ping to keep connection alive
+ useEffect(() => {
+ const pingInterval = setInterval(() => {
+ if (wsRef.current?.readyState === WebSocket.OPEN) {
+ wsRef.current.send(JSON.stringify({ type: 'ping' }));
+ }
+ }, 30000); // Every 30 seconds
+
+ return () => clearInterval(pingInterval);
+ }, []);
+
+ return {
+ messages: state.messages,
+ isThinking: state.isThinking,
+ error: state.error,
+ sessionId: state.sessionId,
+ isConnected: state.isConnected,
+ workingDir: state.workingDir,
+ sendMessage,
+ clearMessages,
+ updateCanvasState,
+ reloadCanvasFromStudy,
+ };
+}
diff --git a/atomizer-dashboard/frontend/src/hooks/useSpecWebSocket.ts b/atomizer-dashboard/frontend/src/hooks/useSpecWebSocket.ts
new file mode 100644
index 00000000..b71d1b2c
--- /dev/null
+++ b/atomizer-dashboard/frontend/src/hooks/useSpecWebSocket.ts
@@ -0,0 +1,288 @@
+/**
+ * useSpecWebSocket - WebSocket connection for real-time spec sync
+ *
+ * Connects to the backend WebSocket endpoint for live spec updates.
+ * Handles auto-reconnection, message parsing, and store updates.
+ *
+ * P2.11-P2.14: WebSocket sync implementation
+ */
+
+import { useEffect, useRef, useCallback, useState } from 'react';
+import { useSpecStore } from './useSpecStore';
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type ConnectionStatus = 'disconnected' | 'connecting' | 'connected' | 'reconnecting';
+
+interface SpecWebSocketMessage {
+ type: 'modification' | 'full_sync' | 'error' | 'ping';
+ payload: unknown;
+}
+
+interface ModificationPayload {
+ operation: 'set' | 'add' | 'remove';
+ path: string;
+ value?: unknown;
+ modified_by: string;
+ timestamp: string;
+ hash: string;
+}
+
+interface ErrorPayload {
+ message: string;
+ code?: string;
+}
+
+interface UseSpecWebSocketOptions {
+ /**
+ * Enable auto-reconnect on disconnect (default: true)
+ */
+ autoReconnect?: boolean;
+
+ /**
+ * Reconnect delay in ms (default: 3000)
+ */
+ reconnectDelay?: number;
+
+ /**
+ * Max reconnect attempts (default: 10)
+ */
+ maxReconnectAttempts?: number;
+
+ /**
+ * Client identifier for tracking modifications (default: 'canvas')
+ */
+ clientId?: string;
+}
+
+interface UseSpecWebSocketReturn {
+ /**
+ * Current connection status
+ */
+ status: ConnectionStatus;
+
+ /**
+ * Manually disconnect
+ */
+ disconnect: () => void;
+
+ /**
+ * Manually reconnect
+ */
+ reconnect: () => void;
+
+ /**
+ * Send a message to the WebSocket (for future use)
+ */
+ send: (message: SpecWebSocketMessage) => void;
+
+ /**
+ * Last error message if any
+ */
+ lastError: string | null;
+}
+
+// ============================================================================
+// Hook
+// ============================================================================
+
+export function useSpecWebSocket(
+ studyId: string | null,
+ options: UseSpecWebSocketOptions = {}
+): UseSpecWebSocketReturn {
+ const {
+ autoReconnect = true,
+ reconnectDelay = 3000,
+ maxReconnectAttempts = 10,
+ clientId = 'canvas',
+ } = options;
+
+ const wsRef = useRef(null);
+ const reconnectAttemptsRef = useRef(0);
+ const reconnectTimeoutRef = useRef | null>(null);
+
+ const [status, setStatus] = useState('disconnected');
+ const [lastError, setLastError] = useState(null);
+
+ // Get store actions
+ const reloadSpec = useSpecStore((s) => s.reloadSpec);
+ const setError = useSpecStore((s) => s.setError);
+
+ // Build WebSocket URL
+ const getWsUrl = useCallback((id: string): string => {
+ const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ const host = window.location.host;
+ return `${protocol}//${host}/api/studies/${encodeURIComponent(id)}/spec/sync?client_id=${clientId}`;
+ }, [clientId]);
+
+ // Handle incoming messages
+ const handleMessage = useCallback((event: MessageEvent) => {
+ try {
+ const message: SpecWebSocketMessage = JSON.parse(event.data);
+
+ switch (message.type) {
+ case 'modification': {
+ const payload = message.payload as ModificationPayload;
+
+ // Skip if this is our own modification
+ if (payload.modified_by === clientId) {
+ return;
+ }
+
+ // Reload spec to get latest state
+ // In a more sophisticated implementation, we could apply the patch locally
+ reloadSpec().catch((err) => {
+ console.error('Failed to reload spec after modification:', err);
+ });
+ break;
+ }
+
+ case 'full_sync': {
+ // Full spec sync requested (e.g., after reconnect)
+ reloadSpec().catch((err) => {
+ console.error('Failed to reload spec during full_sync:', err);
+ });
+ break;
+ }
+
+ case 'error': {
+ const payload = message.payload as ErrorPayload;
+ console.error('WebSocket error:', payload.message);
+ setLastError(payload.message);
+ setError(payload.message);
+ break;
+ }
+
+ case 'ping': {
+ // Keep-alive ping, respond with pong
+ if (wsRef.current?.readyState === WebSocket.OPEN) {
+ wsRef.current.send(JSON.stringify({ type: 'pong' }));
+ }
+ break;
+ }
+
+ default:
+ console.warn('Unknown WebSocket message type:', message.type);
+ }
+ } catch (error) {
+ console.error('Failed to parse WebSocket message:', error);
+ }
+ }, [clientId, reloadSpec, setError]);
+
+ // Connect to WebSocket
+ const connect = useCallback(() => {
+ if (!studyId) return;
+
+ // Clean up existing connection
+ if (wsRef.current) {
+ wsRef.current.close();
+ }
+
+ setStatus('connecting');
+ setLastError(null);
+
+ const url = getWsUrl(studyId);
+ const ws = new WebSocket(url);
+
+ ws.onopen = () => {
+ setStatus('connected');
+ reconnectAttemptsRef.current = 0;
+ };
+
+ ws.onmessage = handleMessage;
+
+ ws.onerror = (event) => {
+ console.error('WebSocket error:', event);
+ setLastError('WebSocket connection error');
+ };
+
+ ws.onclose = (_event) => {
+ setStatus('disconnected');
+
+ // Check if we should reconnect
+ if (autoReconnect && reconnectAttemptsRef.current < maxReconnectAttempts) {
+ reconnectAttemptsRef.current++;
+ setStatus('reconnecting');
+
+ // Clear any existing reconnect timeout
+ if (reconnectTimeoutRef.current) {
+ clearTimeout(reconnectTimeoutRef.current);
+ }
+
+ // Schedule reconnect with exponential backoff
+ const delay = reconnectDelay * Math.min(reconnectAttemptsRef.current, 5);
+ reconnectTimeoutRef.current = setTimeout(() => {
+ connect();
+ }, delay);
+ } else if (reconnectAttemptsRef.current >= maxReconnectAttempts) {
+ setLastError('Max reconnection attempts reached');
+ }
+ };
+
+ wsRef.current = ws;
+ }, [studyId, getWsUrl, handleMessage, autoReconnect, reconnectDelay, maxReconnectAttempts]);
+
+ // Disconnect
+ const disconnect = useCallback(() => {
+ // Clear reconnect timeout
+ if (reconnectTimeoutRef.current) {
+ clearTimeout(reconnectTimeoutRef.current);
+ reconnectTimeoutRef.current = null;
+ }
+
+ // Close WebSocket
+ if (wsRef.current) {
+ wsRef.current.close();
+ wsRef.current = null;
+ }
+
+ reconnectAttemptsRef.current = maxReconnectAttempts; // Prevent auto-reconnect
+ setStatus('disconnected');
+ }, [maxReconnectAttempts]);
+
+ // Reconnect
+ const reconnect = useCallback(() => {
+ reconnectAttemptsRef.current = 0;
+ connect();
+ }, [connect]);
+
+ // Send message
+ const send = useCallback((message: SpecWebSocketMessage) => {
+ if (wsRef.current?.readyState === WebSocket.OPEN) {
+ wsRef.current.send(JSON.stringify(message));
+ } else {
+ console.warn('WebSocket not connected, cannot send message');
+ }
+ }, []);
+
+ // Connect when studyId changes
+ useEffect(() => {
+ if (studyId) {
+ connect();
+ } else {
+ disconnect();
+ }
+
+ return () => {
+ // Cleanup on unmount or studyId change
+ if (reconnectTimeoutRef.current) {
+ clearTimeout(reconnectTimeoutRef.current);
+ }
+ if (wsRef.current) {
+ wsRef.current.close();
+ }
+ };
+ }, [studyId, connect, disconnect]);
+
+ return {
+ status,
+ disconnect,
+ reconnect,
+ send,
+ lastError,
+ };
+}
+
+export default useSpecWebSocket;
diff --git a/atomizer-dashboard/frontend/src/hooks/useWebSocket.ts b/atomizer-dashboard/frontend/src/hooks/useWebSocket.ts
index 3f248469..97e47d73 100644
--- a/atomizer-dashboard/frontend/src/hooks/useWebSocket.ts
+++ b/atomizer-dashboard/frontend/src/hooks/useWebSocket.ts
@@ -18,7 +18,8 @@ export const useOptimizationWebSocket = ({ studyId, onMessage }: UseOptimization
const host = window.location.host; // This will be localhost:3000 in dev
// If using proxy in vite.config.ts, this works.
// If not, we might need to hardcode backend URL for dev:
- const backendHost = import.meta.env.DEV ? 'localhost:8000' : host;
+ // Use port 8001 to match start-dashboard.bat
+ const backendHost = import.meta.env.DEV ? 'localhost:8001' : host;
setSocketUrl(`${protocol}//${backendHost}/api/ws/optimization/${studyId}`);
} else {
diff --git a/atomizer-dashboard/frontend/src/pages/Analysis.tsx b/atomizer-dashboard/frontend/src/pages/Analysis.tsx
index d174b253..93c4ac69 100644
--- a/atomizer-dashboard/frontend/src/pages/Analysis.tsx
+++ b/atomizer-dashboard/frontend/src/pages/Analysis.tsx
@@ -1,4 +1,4 @@
-import { useState, useEffect, lazy, Suspense, useMemo } from 'react';
+import { useState, useEffect, useMemo } from 'react';
import { useNavigate } from 'react-router-dom';
import {
BarChart3,
@@ -14,25 +14,10 @@ import {
} from 'lucide-react';
import { useStudy } from '../context/StudyContext';
import { Card } from '../components/common/Card';
-
-// Lazy load charts
-const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
-const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
-const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
-const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
-const PlotlyCorrelationHeatmap = lazy(() => import('../components/plotly/PlotlyCorrelationHeatmap').then(m => ({ default: m.PlotlyCorrelationHeatmap })));
-const PlotlyFeasibilityChart = lazy(() => import('../components/plotly/PlotlyFeasibilityChart').then(m => ({ default: m.PlotlyFeasibilityChart })));
-const PlotlySurrogateQuality = lazy(() => import('../components/plotly/PlotlySurrogateQuality').then(m => ({ default: m.PlotlySurrogateQuality })));
-const PlotlyRunComparison = lazy(() => import('../components/plotly/PlotlyRunComparison').then(m => ({ default: m.PlotlyRunComparison })));
-
-const ChartLoading = () => (
-
-);
+import { ConvergencePlot } from '../components/ConvergencePlot';
+import { ParameterImportanceChart } from '../components/ParameterImportanceChart';
+import { ParallelCoordinatesPlot } from '../components/ParallelCoordinatesPlot';
+import { ParetoPlot } from '../components/ParetoPlot';
const NoData = ({ message = 'No data available' }: { message?: string }) => (
@@ -383,15 +368,12 @@ export default function Analysis() {
{/* Convergence Plot */}
{trials.length > 0 && (
- }>
-
-
+
)}
@@ -455,30 +437,24 @@ export default function Analysis() {
{/* Parameter Importance */}
{trials.length > 0 && metadata?.design_variables && (
- }>
-
-
+
)}
{/* Parallel Coordinates */}
{trials.length > 0 && metadata && (
- }>
-
-
+
)}
@@ -508,14 +484,11 @@ export default function Analysis() {
{/* Pareto Front Plot */}
{paretoFront.length > 0 && (
- }>
-
-
+
)}
@@ -550,16 +523,10 @@ export default function Analysis() {
{/* Correlations Tab */}
{activeTab === 'correlations' && (
- {/* Correlation Heatmap */}
+ {/* Correlation Analysis */}
{trials.length > 2 && (
-
- }>
-
-
+
+
)}
@@ -612,11 +579,22 @@ export default function Analysis() {
- {/* Feasibility Over Time Chart */}
-
- }>
-
-
+ {/* Feasibility Summary */}
+
+
+
+
+
{stats.feasibilityRate.toFixed(1)}%
+
+
+ {stats.feasible} of {stats.total} trials satisfy all constraints
+
+
{/* Infeasible Trials List */}
@@ -683,11 +661,38 @@ export default function Analysis() {
- {/* Surrogate Quality Charts */}
-
- }>
-
-
+ {/* Surrogate Performance Summary */}
+
+
+
+
Trial Distribution
+
+
+
+
FEA: {stats.feaTrials} trials
+
+ {((stats.feaTrials / stats.total) * 100).toFixed(0)}%
+
+
+
+
+
NN: {stats.nnTrials} trials
+
+ {((stats.nnTrials / stats.total) * 100).toFixed(0)}%
+
+
+
+
+
+
Efficiency Gains
+
+
+ {stats.feaTrials > 0 ? `${(stats.total / stats.feaTrials).toFixed(1)}x` : '1.0x'}
+
+
Effective Speedup
+
+
+
)}
@@ -700,9 +705,36 @@ export default function Analysis() {
Compare different optimization runs within this study. Studies with adaptive optimization
may have multiple runs (e.g., initial FEA exploration, NN-accelerated iterations).
- }>
-
-
+
+
+
+
+ Run
+ Source
+ Trials
+ Best Value
+ Avg Value
+
+
+
+ {runs.map((run) => (
+
+ {run.name || `Run ${run.run_id}`}
+
+
+ {run.source}
+
+
+ {run.trial_count}
+ {run.best_value?.toExponential(4) || 'N/A'}
+ {run.avg_value?.toExponential(4) || 'N/A'}
+
+ ))}
+
+
+
)}
diff --git a/atomizer-dashboard/frontend/src/pages/Dashboard.tsx b/atomizer-dashboard/frontend/src/pages/Dashboard.tsx
index a7359aa2..3995a942 100644
--- a/atomizer-dashboard/frontend/src/pages/Dashboard.tsx
+++ b/atomizer-dashboard/frontend/src/pages/Dashboard.tsx
@@ -1,4 +1,4 @@
-import { useState, useEffect, lazy, Suspense, useRef } from 'react';
+import { useState, useEffect, useRef } from 'react';
import { useNavigate } from 'react-router-dom';
import { Settings } from 'lucide-react';
import { useOptimizationWebSocket } from '../hooks/useWebSocket';
@@ -21,19 +21,6 @@ import { CurrentTrialPanel, OptimizerStatePanel } from '../components/tracker';
import { NivoParallelCoordinates } from '../components/charts';
import type { Trial } from '../types';
-// Lazy load Plotly components for better initial load performance
-const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
-const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
-const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
-const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
-
-// Loading placeholder for lazy components
-const ChartLoading = () => (
-
-);
-
export default function Dashboard() {
const navigate = useNavigate();
const { selectedStudy, refreshStudies, isInitialized } = useStudy();
@@ -62,8 +49,8 @@ export default function Dashboard() {
const [paretoFront, setParetoFront] = useState([]);
const [allTrialsRaw, setAllTrialsRaw] = useState([]); // All trials for parallel coordinates
- // Chart library toggle: 'nivo' (dark theme, default), 'plotly' (more interactive), or 'recharts' (simple)
- const [chartLibrary, setChartLibrary] = useState<'nivo' | 'plotly' | 'recharts'>('nivo');
+ // Chart library toggle: 'nivo' (dark theme, default) or 'recharts' (simple)
+ const [chartLibrary, setChartLibrary] = useState<'nivo' | 'recharts'>('nivo');
// Process status for tracker panels
const [isRunning, setIsRunning] = useState(false);
@@ -464,18 +451,7 @@ export default function Dashboard() {
}`}
title="Modern Nivo charts with dark theme (recommended)"
>
- Nivo
-
- setChartLibrary('plotly')}
- className={`px-3 py-1.5 text-sm transition-colors ${
- chartLibrary === 'plotly'
- ? 'bg-primary-500 text-white'
- : 'bg-dark-600 text-dark-200 hover:bg-dark-500'
- }`}
- title="Interactive Plotly charts with zoom, pan, and export"
- >
- Plotly
+ Advanced
setChartLibrary('recharts')}
@@ -570,22 +546,11 @@ export default function Dashboard() {
title="Pareto Front"
subtitle={`${paretoFront.length} Pareto-optimal solutions | ${studyMetadata.sampler || 'NSGA-II'} | ${studyMetadata.objectives?.length || 2} objectives`}
>
- {chartLibrary === 'plotly' ? (
- }>
-
-
- ) : (
-
- )}
+
)}
@@ -605,16 +570,6 @@ export default function Dashboard() {
paretoFront={paretoFront}
height={380}
/>
- ) : chartLibrary === 'plotly' ? (
- }>
-
-
) : (
- {chartLibrary === 'plotly' ? (
- }>
-
-
- ) : (
-
- )}
+
)}
@@ -663,32 +606,16 @@ export default function Dashboard() {
title="Parameter Importance"
subtitle={`Correlation with ${studyMetadata?.objectives?.[0]?.name || 'Objective'}`}
>
- {chartLibrary === 'plotly' ? (
- }>
- 0
- ? studyMetadata.design_variables
- : Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
- }
- objectiveIndex={0}
- objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
- height={280}
- />
-
- ) : (
- 0
- ? studyMetadata.design_variables
- : Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
- }
- objectiveIndex={0}
- objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
- />
- )}
+ 0
+ ? studyMetadata.design_variables
+ : Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
+ }
+ objectiveIndex={0}
+ objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
+ />
)}
diff --git a/atomizer-dashboard/frontend/src/pages/Home.tsx b/atomizer-dashboard/frontend/src/pages/Home.tsx
index 262887af..c85c6a8f 100644
--- a/atomizer-dashboard/frontend/src/pages/Home.tsx
+++ b/atomizer-dashboard/frontend/src/pages/Home.tsx
@@ -394,18 +394,32 @@ const Home: React.FC = () => {
Study Documentation
- handleSelectStudy(selectedPreview)}
- className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
- style={{
- background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
- color: '#000',
- boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
- }}
- >
- Open
-
-
+
+
navigate(`/canvas/${selectedPreview.id}`)}
+ className="flex items-center gap-2 px-4 py-2.5 rounded-lg transition-all font-medium whitespace-nowrap hover:-translate-y-0.5"
+ style={{
+ background: 'rgba(8, 15, 26, 0.85)',
+ border: '1px solid rgba(0, 212, 230, 0.3)',
+ color: '#00d4e6'
+ }}
+ >
+
+ Canvas
+
+
handleSelectStudy(selectedPreview)}
+ className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
+ style={{
+ background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
+ color: '#000',
+ boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
+ }}
+ >
+ Open
+
+
+
{/* Study Quick Stats */}
diff --git a/atomizer-dashboard/frontend/src/pages/Insights.tsx b/atomizer-dashboard/frontend/src/pages/Insights.tsx
index b43d8f84..32c6a651 100644
--- a/atomizer-dashboard/frontend/src/pages/Insights.tsx
+++ b/atomizer-dashboard/frontend/src/pages/Insights.tsx
@@ -20,11 +20,11 @@ import {
ExternalLink,
Zap,
List,
- LucideIcon
+ LucideIcon,
+ FileText
} from 'lucide-react';
import { useStudy } from '../context/StudyContext';
import { Card } from '../components/common/Card';
-import Plot from 'react-plotly.js';
// ============================================================================
// Types
@@ -642,13 +642,15 @@ export default function Insights() {
Open Full View
)}
- setFullscreen(true)}
- className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
- title="Fullscreen"
- >
-
-
+ {activeInsight.html_path && (
+ setFullscreen(true)}
+ className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
+ title="Fullscreen"
+ >
+
+
+ )}
@@ -674,49 +676,43 @@ export default function Insights() {
)}
- {/* Plotly Figure */}
+ {/* Insight Result */}
- {activeInsight.plotly_figure ? (
-
- ) : (
-
-
-
Insight Generated Successfully
+
+
+
Insight Generated Successfully
+ {activeInsight.html_path ? (
+ <>
+
+ Click the button below to view the interactive visualization.
+
+
window.open(`/api/insights/studies/${selectedStudy?.id}/view/${activeInsight.insight_type}`, '_blank')}
+ className="flex items-center gap-2 px-6 py-3 bg-primary-600 hover:bg-primary-500 text-white rounded-lg font-medium transition-colors"
+ >
+
+ Open Interactive Visualization
+
+ >
+ ) : (
- This insight generates HTML files. Click "Open Full View" to see the visualization.
+ The visualization has been generated. Check the study's insights folder.
- {activeInsight.summary?.html_files && (
-
-
Generated files:
-
- {(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
-
- {f.split(/[/\\]/).pop()}
-
- ))}
-
-
- )}
-
- )}
+ )}
+ {activeInsight.summary?.html_files && (
+
+
Generated files:
+
+ {(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
+
+
+ {f.split(/[/\\]/).pop()}
+
+ ))}
+
+
+ )}
+
{/* Generate Another */}
@@ -736,8 +732,8 @@ export default function Insights() {
)}
- {/* Fullscreen Modal */}
- {fullscreen && activeInsight?.plotly_figure && (
+ {/* Fullscreen Modal - now opens external HTML */}
+ {fullscreen && activeInsight && (
@@ -750,23 +746,24 @@ export default function Insights() {
-
-
+
+ {activeInsight.html_path ? (
+
+ ) : (
+
+
No interactive visualization available for this insight.
+
setFullscreen(false)}
+ className="px-4 py-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg"
+ >
+ Close
+
+
+ )}
)}
diff --git a/atomizer-dashboard/frontend/src/pages/Setup.tsx b/atomizer-dashboard/frontend/src/pages/Setup.tsx
index 111dc259..21fc6d60 100644
--- a/atomizer-dashboard/frontend/src/pages/Setup.tsx
+++ b/atomizer-dashboard/frontend/src/pages/Setup.tsx
@@ -278,7 +278,7 @@ export default function Setup() {
Configuration
setActiveTab('canvas')}
+ onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-primary-600 text-white"
>
@@ -333,7 +333,7 @@ export default function Setup() {
Configuration
setActiveTab('canvas')}
+ onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-dark-800 text-dark-300 hover:text-white hover:bg-dark-700"
>
diff --git a/atomizer-dashboard/frontend/src/types/index.ts b/atomizer-dashboard/frontend/src/types/index.ts
index 928d0848..faf46b24 100644
--- a/atomizer-dashboard/frontend/src/types/index.ts
+++ b/atomizer-dashboard/frontend/src/types/index.ts
@@ -1,3 +1,6 @@
+// AtomizerSpec v2.0 types (unified configuration)
+export * from './atomizer-spec';
+
// Study types
export interface Study {
id: string;
diff --git a/atomizer-dashboard/frontend/vite.config.ts b/atomizer-dashboard/frontend/vite.config.ts
index 43dfeb19..6040903d 100644
--- a/atomizer-dashboard/frontend/vite.config.ts
+++ b/atomizer-dashboard/frontend/vite.config.ts
@@ -17,18 +17,10 @@ export default defineConfig({
}
}
},
- resolve: {
- alias: {
- // Use the smaller basic Plotly distribution
- 'plotly.js/dist/plotly': 'plotly.js-basic-dist'
- }
- },
build: {
rollupOptions: {
output: {
manualChunks: {
- // Separate Plotly into its own chunk for better caching
- plotly: ['plotly.js-basic-dist', 'react-plotly.js'],
// Separate React and core libs
vendor: ['react', 'react-dom', 'react-router-dom'],
// Recharts in its own chunk
@@ -37,8 +29,5 @@ export default defineConfig({
}
},
chunkSizeWarningLimit: 600
- },
- optimizeDeps: {
- include: ['plotly.js-basic-dist']
}
})
diff --git a/atomizer-dashboard/start-dashboard.bat b/atomizer-dashboard/start-dashboard.bat
index 9d47776c..f405fed2 100644
--- a/atomizer-dashboard/start-dashboard.bat
+++ b/atomizer-dashboard/start-dashboard.bat
@@ -25,6 +25,18 @@ if not exist "%CONDA_PATH%\Scripts\activate.bat" (
exit /b 1
)
+:: Stop any existing dashboard processes first
+echo [0/3] Stopping existing processes...
+taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
+taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
+for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%BACKEND_PORT% ^| findstr LISTENING') do (
+ taskkill /F /PID %%a >nul 2>&1
+)
+for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%FRONTEND_PORT% ^| findstr LISTENING') do (
+ taskkill /F /PID %%a >nul 2>&1
+)
+ping 127.0.0.1 -n 2 >nul
+
echo [1/3] Starting Backend Server (port %BACKEND_PORT%)...
start "Atomizer Backend" cmd /k "call %CONDA_PATH%\Scripts\activate.bat %CONDA_ENV% && cd /d %SCRIPT_DIR%backend && python -m uvicorn api.main:app --reload --port %BACKEND_PORT%"
diff --git a/atomizer-dashboard/stop-dashboard.bat b/atomizer-dashboard/stop-dashboard.bat
index 044a3d58..7baae756 100644
--- a/atomizer-dashboard/stop-dashboard.bat
+++ b/atomizer-dashboard/stop-dashboard.bat
@@ -10,11 +10,11 @@ echo.
taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
-:: Kill any remaining processes on the ports
-for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8000 ^| findstr LISTENING') do (
+:: Kill any remaining processes on the ports (backend: 8001, frontend: 3003)
+for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8001 ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)
-for /f "tokens=5" %%a in ('netstat -ano ^| findstr :5173 ^| findstr LISTENING') do (
+for /f "tokens=5" %%a in ('netstat -ano ^| findstr :3003 ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)