feat(dashboard): Enhanced chat, spec management, and Claude integration

Backend:
- spec.py: New AtomizerSpec REST API endpoints
- spec_manager.py: SpecManager service for unified config
- interview_engine.py: Study creation interview logic
- claude.py: Enhanced Claude API with context
- optimization.py: Extended optimization endpoints
- context_builder.py, session_manager.py: Improved services

Frontend:
- Chat components: Enhanced message rendering, tool call cards
- Hooks: useClaudeCode, useSpecWebSocket, improved useChat
- Pages: Updated Dashboard, Analysis, Insights, Setup, Home
- Components: ParallelCoordinatesPlot, ParetoPlot improvements
- App.tsx: Route updates for canvas/studio

Infrastructure:
- vite.config.ts: Build configuration updates
- start/stop-dashboard.bat: Script improvements
This commit is contained in:
2026-01-20 13:10:47 -05:00
parent b05412f807
commit ba0b9a1fae
31 changed files with 4836 additions and 349 deletions

View File

@@ -13,7 +13,7 @@ import sys
# Add parent directory to path to import optimization_engine
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
from api.routes import optimization, claude, terminal, insights, context, files, nx
from api.routes import optimization, claude, terminal, insights, context, files, nx, claude_code, spec
from api.websocket import optimization_stream
@@ -60,6 +60,9 @@ app.include_router(insights.router, prefix="/api/insights", tags=["insights"])
app.include_router(context.router, prefix="/api/context", tags=["context"])
app.include_router(files.router, prefix="/api/files", tags=["files"])
app.include_router(nx.router, prefix="/api/nx", tags=["nx"])
app.include_router(claude_code.router, prefix="/api", tags=["claude-code"])
app.include_router(spec.router, prefix="/api", tags=["spec"])
app.include_router(spec.validate_router, prefix="/api", tags=["spec"])
@app.get("/")
async def root():

View File

@@ -187,7 +187,15 @@ async def session_websocket(websocket: WebSocket, session_id: str):
continue
# Get canvas state from message or use stored state
canvas_state = data.get("canvas_state") or current_canvas_state
msg_canvas = data.get("canvas_state")
canvas_state = msg_canvas if msg_canvas is not None else current_canvas_state
# Debug logging
if canvas_state:
node_count = len(canvas_state.get("nodes", []))
print(f"[Claude WS] Sending message with canvas state: {node_count} nodes")
else:
print("[Claude WS] Sending message WITHOUT canvas state")
async for chunk in manager.send_message(
session_id,
@@ -401,6 +409,175 @@ async def websocket_chat(websocket: WebSocket):
pass
# ========== POWER MODE: Direct API with Write Tools ==========
@router.websocket("/sessions/{session_id}/ws/power")
async def power_mode_websocket(websocket: WebSocket, session_id: str):
"""
WebSocket for power mode chat using direct Anthropic API with write tools.
Unlike the regular /ws endpoint which uses Claude CLI + MCP,
this uses AtomizerClaudeAgent directly with built-in write tools.
This allows immediate modifications without permission prompts.
Message formats (client -> server):
{"type": "message", "content": "user message"}
{"type": "set_study", "study_id": "study_name"}
{"type": "ping"}
Message formats (server -> client):
{"type": "text", "content": "..."}
{"type": "tool_call", "tool": "...", "input": {...}}
{"type": "tool_result", "result": "..."}
{"type": "done", "tool_calls": [...]}
{"type": "error", "message": "..."}
{"type": "spec_modified", "changes": [...]}
{"type": "pong"}
"""
await websocket.accept()
manager = get_session_manager()
session = manager.get_session(session_id)
if not session:
await websocket.send_json({"type": "error", "message": "Session not found"})
await websocket.close()
return
# Import AtomizerClaudeAgent for direct API access
from api.services.claude_agent import AtomizerClaudeAgent
# Create agent with study context
agent = AtomizerClaudeAgent(study_id=session.study_id)
conversation_history: List[Dict[str, Any]] = []
# Load initial spec and set canvas state so Claude sees current canvas
initial_spec = agent.load_current_spec()
if initial_spec:
# Send initial spec to frontend
await websocket.send_json({
"type": "spec_updated",
"spec": initial_spec,
"reason": "initial_load"
})
try:
while True:
data = await websocket.receive_json()
if data.get("type") == "message":
content = data.get("content", "")
if not content:
continue
try:
# Use streaming API with tool support for real-time response
last_tool_calls = []
async for event in agent.chat_stream_with_tools(content, conversation_history):
event_type = event.get("type")
if event_type == "text":
# Stream text tokens to frontend immediately
await websocket.send_json({
"type": "text",
"content": event.get("content", ""),
})
elif event_type == "tool_call":
# Tool is being called
tool_info = event.get("tool", {})
await websocket.send_json({
"type": "tool_call",
"tool": tool_info,
})
elif event_type == "tool_result":
# Tool finished executing
tool_name = event.get("tool", "")
await websocket.send_json({
"type": "tool_result",
"tool": tool_name,
"result": event.get("result", ""),
})
# If it was a write tool, send full updated spec
if tool_name in ["add_design_variable", "add_extractor",
"add_objective", "add_constraint",
"update_spec_field", "remove_node",
"create_study"]:
# Load updated spec and update agent's canvas state
updated_spec = agent.load_current_spec()
if updated_spec:
await websocket.send_json({
"type": "spec_updated",
"tool": tool_name,
"spec": updated_spec, # Full spec for direct canvas update
})
elif event_type == "done":
# Streaming complete
last_tool_calls = event.get("tool_calls", [])
await websocket.send_json({
"type": "done",
"tool_calls": last_tool_calls,
})
# Update conversation history for next message
# Note: For proper history tracking, we'd need to store messages properly
# For now, we append the user message and response
conversation_history.append({"role": "user", "content": content})
conversation_history.append({"role": "assistant", "content": event.get("response", "")})
except Exception as e:
import traceback
traceback.print_exc()
await websocket.send_json({
"type": "error",
"message": str(e),
})
elif data.get("type") == "canvas_edit":
# User made a manual edit to the canvas - update Claude's context
spec = data.get("spec")
if spec:
agent.set_canvas_state(spec)
await websocket.send_json({
"type": "canvas_edit_received",
"acknowledged": True
})
elif data.get("type") == "set_study":
study_id = data.get("study_id")
if study_id:
await manager.set_study_context(session_id, study_id)
# Recreate agent with new study context
agent = AtomizerClaudeAgent(study_id=study_id)
conversation_history = [] # Clear history on study change
# Load spec for new study
new_spec = agent.load_current_spec()
await websocket.send_json({
"type": "context_updated",
"study_id": study_id,
})
if new_spec:
await websocket.send_json({
"type": "spec_updated",
"spec": new_spec,
"reason": "study_change"
})
elif data.get("type") == "ping":
await websocket.send_json({"type": "pong"})
except WebSocketDisconnect:
pass
except Exception as e:
try:
await websocket.send_json({"type": "error", "message": str(e)})
except:
pass
@router.get("/suggestions")
async def get_chat_suggestions(study_id: Optional[str] = None):
"""

View File

@@ -38,16 +38,30 @@ def resolve_study_path(study_id: str) -> Path:
"""Find study folder by scanning all topic directories.
Supports nested folder structure: studies/Topic/study_name/
Study ID is the short name (e.g., 'm1_mirror_adaptive_V14')
Study ID can be:
- Short name (e.g., 'm1_mirror_adaptive_V14') - scans all topic folders
- Full nested path (e.g., 'M1_Mirror/m1_mirror_cost_reduction_lateral')
Returns the full path to the study directory.
Raises HTTPException 404 if not found.
"""
# Handle nested path format (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
if "/" in study_id:
# Try with forward slashes
nested_path = STUDIES_DIR / study_id
if nested_path.exists() and nested_path.is_dir():
if _is_valid_study_dir(nested_path):
return nested_path
# Try with backslashes (Windows path)
nested_path = STUDIES_DIR / study_id.replace("/", "\\")
if nested_path.exists() and nested_path.is_dir():
if _is_valid_study_dir(nested_path):
return nested_path
# First check direct path (backwards compatibility for flat structure)
direct_path = STUDIES_DIR / study_id
if direct_path.exists() and direct_path.is_dir():
# Verify it's actually a study (has 1_setup or config)
if (direct_path / "1_setup").exists() or (direct_path / "optimization_config.json").exists():
if _is_valid_study_dir(direct_path):
return direct_path
# Scan topic folders for nested structure
@@ -55,13 +69,21 @@ def resolve_study_path(study_id: str) -> Path:
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
study_dir = topic_dir / study_id
if study_dir.exists() and study_dir.is_dir():
# Verify it's actually a study
if (study_dir / "1_setup").exists() or (study_dir / "optimization_config.json").exists():
if _is_valid_study_dir(study_dir):
return study_dir
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
def _is_valid_study_dir(study_dir: Path) -> bool:
"""Check if a directory is a valid study directory."""
return (
(study_dir / "1_setup").exists() or
(study_dir / "optimization_config.json").exists() or
(study_dir / "atomizer_spec.json").exists()
)
def get_study_topic(study_dir: Path) -> Optional[str]:
"""Get the topic folder name for a study, or None if in root."""
# Check if parent is a topic folder (not the root studies dir)
@@ -1542,16 +1564,17 @@ async def get_study_image(study_id: str, image_path: str):
raise HTTPException(status_code=500, detail=f"Failed to serve image: {str(e)}")
@router.get("/studies/{study_id}/config")
@router.get("/studies/{study_id:path}/config")
async def get_study_config(study_id: str):
"""
Get the full optimization_config.json for a study
Get the study configuration - reads from atomizer_spec.json (v2.0) first,
falls back to legacy optimization_config.json if not found.
Args:
study_id: Study identifier
Returns:
JSON with the complete configuration
JSON with the complete configuration in a unified format
"""
try:
study_dir = resolve_study_path(study_id)
@@ -1559,7 +1582,22 @@ async def get_study_config(study_id: str):
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Look for config in various locations
# Priority 1: atomizer_spec.json (v2.0 unified format)
spec_file = study_dir / "atomizer_spec.json"
if spec_file.exists():
with open(spec_file) as f:
spec = json.load(f)
# Transform AtomizerSpec to the expected config format
config = _transform_spec_to_config(spec, study_id)
return {
"config": config,
"path": str(spec_file),
"study_id": study_id,
"source": "atomizer_spec"
}
# Priority 2: Legacy optimization_config.json
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "optimization_config.json"
@@ -1573,7 +1611,8 @@ async def get_study_config(study_id: str):
return {
"config": config,
"path": str(config_file),
"study_id": study_id
"study_id": study_id,
"source": "legacy_config"
}
except HTTPException:
@@ -1582,6 +1621,118 @@ async def get_study_config(study_id: str):
raise HTTPException(status_code=500, detail=f"Failed to read config: {str(e)}")
def _transform_spec_to_config(spec: dict, study_id: str) -> dict:
"""Transform AtomizerSpec v2.0 format to legacy config format for backwards compatibility."""
meta = spec.get("meta", {})
model = spec.get("model", {})
optimization = spec.get("optimization", {})
# Transform design variables
design_variables = []
for dv in spec.get("design_variables", []):
bounds = dv.get("bounds", {})
design_variables.append({
"name": dv.get("name"),
"expression_name": dv.get("expression_name"),
"type": "float" if dv.get("type") == "continuous" else dv.get("type", "float"),
"min": bounds.get("min"),
"max": bounds.get("max"),
"low": bounds.get("min"), # Alias for compatibility
"high": bounds.get("max"), # Alias for compatibility
"baseline": dv.get("baseline"),
"unit": dv.get("units"),
"units": dv.get("units"),
"enabled": dv.get("enabled", True)
})
# Transform objectives
objectives = []
for obj in spec.get("objectives", []):
source = obj.get("source", {})
objectives.append({
"name": obj.get("name"),
"direction": obj.get("direction", "minimize"),
"weight": obj.get("weight", 1.0),
"target": obj.get("target"),
"unit": obj.get("units"),
"units": obj.get("units"),
"extractor_id": source.get("extractor_id"),
"output_key": source.get("output_key")
})
# Transform constraints
constraints = []
for con in spec.get("constraints", []):
constraints.append({
"name": con.get("name"),
"type": _operator_to_type(con.get("operator", "<=")),
"operator": con.get("operator"),
"max_value": con.get("threshold") if con.get("operator") in ["<=", "<"] else None,
"min_value": con.get("threshold") if con.get("operator") in [">=", ">"] else None,
"bound": con.get("threshold"),
"unit": con.get("units"),
"units": con.get("units")
})
# Transform extractors
extractors = []
for ext in spec.get("extractors", []):
extractors.append({
"name": ext.get("name"),
"type": ext.get("type"),
"builtin": ext.get("builtin", True),
"config": ext.get("config", {}),
"outputs": ext.get("outputs", [])
})
# Get algorithm info
algorithm = optimization.get("algorithm", {})
budget = optimization.get("budget", {})
# Build the config in legacy format
config = {
"study_name": meta.get("study_name", study_id),
"description": meta.get("description", ""),
"version": meta.get("version", "2.0"),
"design_variables": design_variables,
"objectives": objectives,
"constraints": constraints,
"extractors": extractors,
"optimization": {
"algorithm": algorithm.get("type", "TPE"),
"n_trials": budget.get("max_trials", 100),
"max_time_hours": budget.get("max_time_hours"),
"convergence_patience": budget.get("convergence_patience")
},
"optimization_settings": {
"sampler": algorithm.get("type", "TPE"),
"n_trials": budget.get("max_trials", 100)
},
"algorithm": {
"name": "Optuna",
"sampler": algorithm.get("type", "TPE"),
"n_trials": budget.get("max_trials", 100)
},
"model": model,
"sim_file": model.get("sim", {}).get("path") if isinstance(model.get("sim"), dict) else None
}
return config
def _operator_to_type(operator: str) -> str:
"""Convert constraint operator to legacy type string."""
mapping = {
"<=": "le",
"<": "le",
">=": "ge",
">": "ge",
"==": "eq",
"=": "eq"
}
return mapping.get(operator, "le")
# ============================================================================
# Process Control Endpoints
# ============================================================================
@@ -2851,7 +3002,162 @@ async def get_study_runs(study_id: str):
class UpdateConfigRequest(BaseModel):
config: dict
config: Optional[dict] = None
intent: Optional[dict] = None
def intent_to_config(intent: dict, existing_config: Optional[dict] = None) -> dict:
"""
Convert canvas intent format to optimization_config.json format.
Preserves existing config fields that aren't in the intent.
"""
# Start with existing config or empty
config = existing_config.copy() if existing_config else {}
# Metadata
if intent.get('model', {}).get('path'):
model_path = Path(intent['model']['path']).name
if 'simulation' not in config:
config['simulation'] = {}
config['simulation']['model_file'] = model_path
# Try to infer other files from model name
base_name = model_path.replace('.prt', '')
if not config['simulation'].get('fem_file'):
config['simulation']['fem_file'] = f"{base_name}_fem1.fem"
if not config['simulation'].get('sim_file'):
config['simulation']['sim_file'] = f"{base_name}_sim1.sim"
# Solver
if intent.get('solver', {}).get('type'):
solver_type = intent['solver']['type']
if 'simulation' not in config:
config['simulation'] = {}
config['simulation']['solver'] = 'nastran'
# Map SOL types to analysis_types
sol_to_analysis = {
'SOL101': ['static'],
'SOL103': ['modal'],
'SOL105': ['buckling'],
'SOL106': ['nonlinear'],
'SOL111': ['modal', 'frequency_response'],
'SOL112': ['modal', 'transient'],
}
config['simulation']['analysis_types'] = sol_to_analysis.get(solver_type, ['static'])
# Design Variables
if intent.get('design_variables'):
config['design_variables'] = []
for dv in intent['design_variables']:
config['design_variables'].append({
'parameter': dv.get('name', dv.get('expression_name', '')),
'bounds': [dv.get('min', 0), dv.get('max', 100)],
'description': dv.get('description', f"Design variable: {dv.get('name', '')}"),
})
# Extractors → used for objectives/constraints extraction
extractor_map = {}
if intent.get('extractors'):
for ext in intent['extractors']:
ext_id = ext.get('id', '')
ext_name = ext.get('name', '')
extractor_map[ext_name] = ext
# Objectives
if intent.get('objectives'):
config['objectives'] = []
for obj in intent['objectives']:
obj_config = {
'name': obj.get('name', 'objective'),
'goal': obj.get('direction', 'minimize'),
'weight': obj.get('weight', 1.0),
'description': obj.get('description', f"Objective: {obj.get('name', '')}"),
}
# Add extraction config if extractor referenced
extractor_name = obj.get('extractor')
if extractor_name and extractor_name in extractor_map:
ext = extractor_map[extractor_name]
ext_config = ext.get('config', {})
obj_config['extraction'] = {
'action': _extractor_id_to_action(ext.get('id', '')),
'domain': 'result_extraction',
'params': ext_config,
}
config['objectives'].append(obj_config)
# Constraints
if intent.get('constraints'):
config['constraints'] = []
for con in intent['constraints']:
op = con.get('operator', '<=')
con_type = 'less_than' if '<' in op else 'greater_than' if '>' in op else 'equal_to'
con_config = {
'name': con.get('name', 'constraint'),
'type': con_type,
'threshold': con.get('value', 0),
'description': con.get('description', f"Constraint: {con.get('name', '')}"),
}
# Add extraction config if extractor referenced
extractor_name = con.get('extractor')
if extractor_name and extractor_name in extractor_map:
ext = extractor_map[extractor_name]
ext_config = ext.get('config', {})
con_config['extraction'] = {
'action': _extractor_id_to_action(ext.get('id', '')),
'domain': 'result_extraction',
'params': ext_config,
}
config['constraints'].append(con_config)
# Optimization settings
if intent.get('optimization'):
opt = intent['optimization']
if 'optimization_settings' not in config:
config['optimization_settings'] = {}
if opt.get('max_trials'):
config['optimization_settings']['n_trials'] = opt['max_trials']
if opt.get('method'):
# Map method names to Optuna sampler names
method_map = {
'TPE': 'TPESampler',
'CMA-ES': 'CmaEsSampler',
'NSGA-II': 'NSGAIISampler',
'RandomSearch': 'RandomSampler',
'GP-BO': 'GPSampler',
}
config['optimization_settings']['sampler'] = method_map.get(opt['method'], opt['method'])
# Surrogate
if intent.get('surrogate', {}).get('enabled'):
config['surrogate'] = {
'type': intent['surrogate'].get('type', 'MLP'),
'min_trials': intent['surrogate'].get('min_trials', 20),
}
return config
def _extractor_id_to_action(ext_id: str) -> str:
"""Map extractor IDs (E1, E2, etc.) to extraction action names."""
action_map = {
'E1': 'extract_displacement',
'E2': 'extract_frequency',
'E3': 'extract_stress',
'E4': 'extract_mass',
'E5': 'extract_mass',
'E8': 'extract_zernike',
'E9': 'extract_zernike',
'E10': 'extract_zernike',
'displacement': 'extract_displacement',
'frequency': 'extract_frequency',
'stress': 'extract_stress',
'mass': 'extract_mass',
'mass_bdf': 'extract_mass',
'mass_cad': 'extract_mass',
'zernike': 'extract_zernike',
'zernike_opd': 'extract_zernike',
}
return action_map.get(ext_id, 'extract_displacement')
@router.put("/studies/{study_id}/config")
@@ -2859,9 +3165,13 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
"""
Update the optimization_config.json for a study
Accepts either:
- {"config": {...}} - Direct config object (overwrites)
- {"intent": {...}} - Canvas intent (converted and merged with existing)
Args:
study_id: Study identifier
request: New configuration data
request: New configuration data (config or intent)
Returns:
JSON with success status
@@ -2891,9 +3201,24 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
backup_file = config_file.with_suffix('.json.backup')
shutil.copy(config_file, backup_file)
# Determine which format was provided
if request.config is not None:
# Direct config update
new_config = request.config
elif request.intent is not None:
# Convert intent to config, merging with existing
with open(config_file, 'r') as f:
existing_config = json.load(f)
new_config = intent_to_config(request.intent, existing_config)
else:
raise HTTPException(
status_code=400,
detail="Request must include either 'config' or 'intent' field"
)
# Write new config
with open(config_file, 'w') as f:
json.dump(request.config, f, indent=2)
json.dump(new_config, f, indent=2)
return {
"success": True,

View File

@@ -0,0 +1,646 @@
"""
AtomizerSpec v2.0 API Endpoints
REST API for managing AtomizerSpec configurations.
All spec modifications flow through these endpoints.
Endpoints:
- GET /studies/{study_id}/spec - Get full spec
- PUT /studies/{study_id}/spec - Replace entire spec
- PATCH /studies/{study_id}/spec - Partial update
- POST /studies/{study_id}/spec/validate - Validate spec
- POST /studies/{study_id}/spec/nodes - Add node
- PATCH /studies/{study_id}/spec/nodes/{node_id} - Update node
- DELETE /studies/{study_id}/spec/nodes/{node_id} - Delete node
- POST /studies/{study_id}/spec/custom-functions - Add custom extractor
- WebSocket /studies/{study_id}/spec/sync - Real-time sync
"""
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect, Query
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import json
import sys
import asyncio
# Add project root to path
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
from api.services.spec_manager import (
SpecManager,
SpecManagerError,
SpecNotFoundError,
SpecConflictError,
get_spec_manager,
)
from optimization_engine.config.spec_models import (
AtomizerSpec,
ValidationReport,
)
from optimization_engine.config.spec_validator import SpecValidationError
router = APIRouter(prefix="/studies/{study_id:path}/spec", tags=["spec"])
# Base studies directory
STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
# ============================================================================
# Request/Response Models
# ============================================================================
class SpecPatchRequest(BaseModel):
"""Request for patching a spec field."""
path: str = Field(..., description="JSONPath to the field (e.g., 'objectives[0].weight')")
value: Any = Field(..., description="New value")
modified_by: str = Field(default="api", description="Who is making the change")
class NodeAddRequest(BaseModel):
"""Request for adding a node."""
type: str = Field(..., description="Node type: designVar, extractor, objective, constraint")
data: Dict[str, Any] = Field(..., description="Node data")
modified_by: str = Field(default="canvas", description="Who is making the change")
class NodeUpdateRequest(BaseModel):
"""Request for updating a node."""
updates: Dict[str, Any] = Field(..., description="Fields to update")
modified_by: str = Field(default="canvas", description="Who is making the change")
class CustomFunctionRequest(BaseModel):
"""Request for adding a custom extractor function."""
name: str = Field(..., description="Function name")
code: str = Field(..., description="Python source code")
outputs: List[str] = Field(..., description="Output names")
description: Optional[str] = Field(default=None, description="Human-readable description")
modified_by: str = Field(default="claude", description="Who is making the change")
class ExtractorValidationRequest(BaseModel):
"""Request for validating custom extractor code."""
function_name: str = Field(default="extract", description="Expected function name")
source: str = Field(..., description="Python source code to validate")
class SpecUpdateResponse(BaseModel):
"""Response for spec modification operations."""
success: bool
hash: str
modified: str
modified_by: str
class NodeAddResponse(BaseModel):
"""Response for node add operation."""
success: bool
node_id: str
message: str
class ValidationResponse(BaseModel):
"""Response for validation endpoint."""
valid: bool
errors: List[Dict[str, Any]]
warnings: List[Dict[str, Any]]
summary: Dict[str, int]
# ============================================================================
# Helper Functions
# ============================================================================
def resolve_study_path(study_id: str) -> Path:
"""Find study folder by scanning all topic directories.
Supports both formats:
- "study_name" - Will scan topic folders to find it
- "Topic/study_name" - Direct nested path (e.g., "M1_Mirror/m1_mirror_v1")
"""
# Handle nested paths (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
if "/" in study_id:
nested_path = STUDIES_DIR / study_id.replace("/", "\\") # Handle Windows paths
if nested_path.exists() and nested_path.is_dir():
return nested_path
# Also try with forward slashes (Path handles both)
nested_path = STUDIES_DIR / study_id
if nested_path.exists() and nested_path.is_dir():
return nested_path
# Direct path (flat structure)
direct_path = STUDIES_DIR / study_id
if direct_path.exists() and direct_path.is_dir():
return direct_path
# Scan topic folders (nested structure)
for topic_dir in STUDIES_DIR.iterdir():
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
study_dir = topic_dir / study_id
if study_dir.exists() and study_dir.is_dir():
return study_dir
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
def get_manager(study_id: str) -> SpecManager:
"""Get SpecManager for a study."""
study_path = resolve_study_path(study_id)
return get_spec_manager(study_path)
# ============================================================================
# REST Endpoints
# ============================================================================
@router.get("", response_model=None)
async def get_spec(study_id: str):
"""
Get the full AtomizerSpec for a study.
Returns the complete spec JSON with all design variables, extractors,
objectives, constraints, and canvas state.
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(
status_code=404,
detail=f"No AtomizerSpec found for study '{study_id}'. Use migration or create new spec."
)
try:
spec = manager.load()
return spec.model_dump(mode='json')
except SpecValidationError as e:
# Return spec even if invalid, but include validation info
raw = manager.load_raw()
return JSONResponse(
status_code=200,
content={
**raw,
"_validation_error": str(e)
}
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/raw")
async def get_spec_raw(study_id: str):
"""
Get the raw spec JSON without validation.
Useful for debugging or when spec is invalid.
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
return manager.load_raw()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/hash")
async def get_spec_hash(study_id: str):
"""Get the current spec hash for conflict detection."""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
return {"hash": manager.get_hash()}
@router.put("", response_model=SpecUpdateResponse)
async def replace_spec(
study_id: str,
spec: Dict[str, Any],
modified_by: str = Query(default="api"),
expected_hash: Optional[str] = Query(default=None)
):
"""
Replace the entire spec.
Validates the new spec before saving. Optionally check for conflicts
using expected_hash parameter.
"""
manager = get_manager(study_id)
try:
new_hash = manager.save(spec, modified_by=modified_by, expected_hash=expected_hash)
reloaded = manager.load()
return SpecUpdateResponse(
success=True,
hash=new_hash,
modified=reloaded.meta.modified or "",
modified_by=modified_by
)
except SpecConflictError as e:
raise HTTPException(
status_code=409,
detail={
"message": str(e),
"current_hash": e.current_hash
}
)
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.patch("", response_model=SpecUpdateResponse)
async def patch_spec(study_id: str, request: SpecPatchRequest):
"""
Partial update to spec using JSONPath.
Example paths:
- "objectives[0].weight" - Update objective weight
- "design_variables[1].bounds.max" - Update DV bound
- "meta.description" - Update description
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
spec = manager.patch(request.path, request.value, modified_by=request.modified_by)
return SpecUpdateResponse(
success=True,
hash=manager.get_hash(),
modified=spec.meta.modified or "",
modified_by=request.modified_by
)
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/validate", response_model=ValidationResponse)
async def validate_spec(study_id: str):
"""
Validate the spec and return detailed report.
Returns errors, warnings, and summary of the spec contents.
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
report = manager.validate_and_report()
return ValidationResponse(
valid=report.valid,
errors=[e.model_dump() for e in report.errors],
warnings=[w.model_dump() for w in report.warnings],
summary=report.summary.model_dump()
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# ============================================================================
# Node CRUD Endpoints
# ============================================================================
@router.post("/nodes", response_model=NodeAddResponse)
async def add_node(study_id: str, request: NodeAddRequest):
"""
Add a new node to the spec.
Supported types: designVar, extractor, objective, constraint
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
valid_types = ["designVar", "extractor", "objective", "constraint"]
if request.type not in valid_types:
raise HTTPException(
status_code=400,
detail=f"Invalid node type '{request.type}'. Valid: {valid_types}"
)
try:
node_id = manager.add_node(request.type, request.data, modified_by=request.modified_by)
return NodeAddResponse(
success=True,
node_id=node_id,
message=f"Added {request.type} node: {node_id}"
)
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.patch("/nodes/{node_id}")
async def update_node(study_id: str, node_id: str, request: NodeUpdateRequest):
"""Update an existing node's properties."""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
manager.update_node(node_id, request.updates, modified_by=request.modified_by)
return {"success": True, "message": f"Updated node {node_id}"}
except SpecManagerError as e:
raise HTTPException(status_code=404, detail=str(e))
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/nodes/{node_id}")
async def delete_node(
study_id: str,
node_id: str,
modified_by: str = Query(default="canvas")
):
"""
Delete a node and all edges referencing it.
Use with caution - this will also remove any objectives or constraints
that reference a deleted extractor.
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
manager.remove_node(node_id, modified_by=modified_by)
return {"success": True, "message": f"Removed node {node_id}"}
except SpecManagerError as e:
raise HTTPException(status_code=404, detail=str(e))
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# ============================================================================
# Custom Function Endpoint
# ============================================================================
@router.post("/custom-functions", response_model=NodeAddResponse)
async def add_custom_function(study_id: str, request: CustomFunctionRequest):
"""
Add a custom Python function as an extractor.
The function will be available in the optimization workflow.
Claude can use this to add new physics extraction logic.
"""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
extractor_id = manager.add_custom_function(
name=request.name,
code=request.code,
outputs=request.outputs,
description=request.description,
modified_by=request.modified_by
)
return NodeAddResponse(
success=True,
node_id=extractor_id,
message=f"Added custom extractor: {request.name}"
)
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# Separate router for non-study-specific endpoints
validate_router = APIRouter(prefix="/spec", tags=["spec"])
@validate_router.post("/validate-extractor")
async def validate_custom_extractor(request: ExtractorValidationRequest):
"""
Validate custom extractor Python code.
Checks syntax, security patterns, and function signature.
Does not require a study - can be used before adding to spec.
"""
try:
from optimization_engine.extractors.custom_extractor_loader import (
validate_extractor_code,
ExtractorSecurityError,
)
try:
is_valid, errors = validate_extractor_code(request.source, request.function_name)
return {
"valid": is_valid,
"errors": errors
}
except ExtractorSecurityError as e:
return {
"valid": False,
"errors": [str(e)]
}
except ImportError as e:
raise HTTPException(
status_code=500,
detail=f"Custom extractor loader not available: {e}"
)
# ============================================================================
# Edge Endpoints
# ============================================================================
@router.post("/edges")
async def add_edge(
study_id: str,
source: str = Query(..., description="Source node ID"),
target: str = Query(..., description="Target node ID"),
modified_by: str = Query(default="canvas")
):
"""Add a canvas edge between two nodes."""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
manager.add_edge(source, target, modified_by=modified_by)
return {"success": True, "message": f"Added edge {source} -> {target}"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/edges")
async def delete_edge(
study_id: str,
source: str = Query(..., description="Source node ID"),
target: str = Query(..., description="Target node ID"),
modified_by: str = Query(default="canvas")
):
"""Remove a canvas edge."""
manager = get_manager(study_id)
if not manager.exists():
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
try:
manager.remove_edge(source, target, modified_by=modified_by)
return {"success": True, "message": f"Removed edge {source} -> {target}"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# ============================================================================
# WebSocket Sync Endpoint
# ============================================================================
class WebSocketSubscriber:
"""WebSocket subscriber adapter."""
def __init__(self, websocket: WebSocket):
self.websocket = websocket
async def send_json(self, data: Dict[str, Any]) -> None:
await self.websocket.send_json(data)
@router.websocket("/sync")
async def websocket_sync(websocket: WebSocket, study_id: str):
"""
WebSocket endpoint for real-time spec sync.
Clients receive notifications when spec changes:
- spec_updated: Spec was modified
- node_added: New node added
- node_removed: Node removed
- validation_error: Validation failed
"""
await websocket.accept()
manager = get_manager(study_id)
subscriber = WebSocketSubscriber(websocket)
# Subscribe to updates
manager.subscribe(subscriber)
try:
# Send initial connection ack
await websocket.send_json({
"type": "connection_ack",
"study_id": study_id,
"hash": manager.get_hash() if manager.exists() else None,
"message": "Connected to spec sync"
})
# Keep connection alive and handle client messages
while True:
try:
data = await asyncio.wait_for(
websocket.receive_json(),
timeout=30.0 # Heartbeat interval
)
# Handle client messages
msg_type = data.get("type")
if msg_type == "ping":
await websocket.send_json({"type": "pong"})
elif msg_type == "patch_node":
# Client requests node update
try:
manager.update_node(
data["node_id"],
data.get("data", {}),
modified_by=data.get("modified_by", "canvas")
)
except Exception as e:
await websocket.send_json({
"type": "error",
"message": str(e)
})
elif msg_type == "update_position":
# Client updates node position
try:
manager.update_node_position(
data["node_id"],
data["position"],
modified_by=data.get("modified_by", "canvas")
)
except Exception as e:
await websocket.send_json({
"type": "error",
"message": str(e)
})
except asyncio.TimeoutError:
# Send heartbeat
await websocket.send_json({"type": "heartbeat"})
except WebSocketDisconnect:
pass
finally:
manager.unsubscribe(subscriber)
# ============================================================================
# Create/Initialize Spec
# ============================================================================
@router.post("/create")
async def create_spec(
study_id: str,
spec: Dict[str, Any],
modified_by: str = Query(default="api")
):
"""
Create a new spec for a study.
Use this when migrating from old config or creating a new study.
Will fail if spec already exists (use PUT to replace).
"""
manager = get_manager(study_id)
if manager.exists():
raise HTTPException(
status_code=409,
detail=f"Spec already exists for '{study_id}'. Use PUT to replace."
)
try:
# Ensure meta fields are set
if "meta" not in spec:
spec["meta"] = {}
spec["meta"]["created_by"] = modified_by
new_hash = manager.save(spec, modified_by=modified_by)
return {
"success": True,
"hash": new_hash,
"message": f"Created spec for {study_id}"
}
except SpecValidationError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -3,5 +3,13 @@ Atomizer Dashboard Services
"""
from .claude_agent import AtomizerClaudeAgent
from .spec_manager import SpecManager, SpecManagerError, SpecNotFoundError, SpecConflictError, get_spec_manager
__all__ = ['AtomizerClaudeAgent']
__all__ = [
'AtomizerClaudeAgent',
'SpecManager',
'SpecManagerError',
'SpecNotFoundError',
'SpecConflictError',
'get_spec_manager',
]

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,11 @@ class ContextBuilder:
# Canvas context takes priority - if user is working on a canvas, include it
if canvas_state:
node_count = len(canvas_state.get("nodes", []))
print(f"[ContextBuilder] Including canvas context with {node_count} nodes")
parts.append(self._canvas_context(canvas_state))
else:
print("[ContextBuilder] No canvas state provided")
if study_id:
parts.append(self._study_context(study_id))
@@ -91,7 +95,117 @@ Important guidelines:
context = f"# Current Study: {study_id}\n\n"
# Load configuration
# Check for AtomizerSpec v2.0 first (preferred)
spec_path = study_dir / "1_setup" / "atomizer_spec.json"
if not spec_path.exists():
spec_path = study_dir / "atomizer_spec.json"
if spec_path.exists():
context += self._spec_context(spec_path)
else:
# Fall back to legacy optimization_config.json
context += self._legacy_config_context(study_dir)
# Check for results
db_path = study_dir / "3_results" / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(db_path)
count = conn.execute(
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
).fetchone()[0]
best = conn.execute("""
SELECT MIN(tv.value) FROM trial_values tv
JOIN trials t ON tv.trial_id = t.trial_id
WHERE t.state = 'COMPLETE'
""").fetchone()[0]
context += f"\n## Results Status\n\n"
context += f"- **Trials completed**: {count}\n"
if best is not None:
context += f"- **Best objective**: {best:.6f}\n"
conn.close()
except Exception:
pass
return context
def _spec_context(self, spec_path: Path) -> str:
"""Build context from AtomizerSpec v2.0 file"""
context = "**Format**: AtomizerSpec v2.0\n\n"
try:
with open(spec_path) as f:
spec = json.load(f)
context += "## Configuration\n\n"
# Design variables
dvs = spec.get("design_variables", [])
if dvs:
context += "**Design Variables:**\n"
for dv in dvs[:10]:
bounds = dv.get("bounds", {})
bound_str = f"[{bounds.get('min', '?')}, {bounds.get('max', '?')}]"
enabled = "" if dv.get("enabled", True) else ""
context += f"- {dv.get('name', 'unnamed')}: {bound_str} {enabled}\n"
if len(dvs) > 10:
context += f"- ... and {len(dvs) - 10} more\n"
# Extractors
extractors = spec.get("extractors", [])
if extractors:
context += "\n**Extractors:**\n"
for ext in extractors:
ext_type = ext.get("type", "unknown")
outputs = ext.get("outputs", [])
output_names = [o.get("name", "?") for o in outputs[:3]]
builtin = "builtin" if ext.get("builtin", True) else "custom"
context += f"- {ext.get('name', 'unnamed')} ({ext_type}, {builtin}): outputs {output_names}\n"
# Objectives
objs = spec.get("objectives", [])
if objs:
context += "\n**Objectives:**\n"
for obj in objs:
direction = obj.get("direction", "minimize")
weight = obj.get("weight", 1.0)
context += f"- {obj.get('name', 'unnamed')} ({direction}, weight={weight})\n"
# Constraints
constraints = spec.get("constraints", [])
if constraints:
context += "\n**Constraints:**\n"
for c in constraints:
op = c.get("operator", "<=")
thresh = c.get("threshold", "?")
context += f"- {c.get('name', 'unnamed')}: {op} {thresh}\n"
# Optimization settings
opt = spec.get("optimization", {})
algo = opt.get("algorithm", {})
budget = opt.get("budget", {})
method = algo.get("type", "TPE")
max_trials = budget.get("max_trials", "not set")
context += f"\n**Optimization**: {method}, max_trials: {max_trials}\n"
# Surrogate
surrogate = opt.get("surrogate", {})
if surrogate.get("enabled"):
sur_type = surrogate.get("type", "gaussian_process")
context += f"**Surrogate**: {sur_type} enabled\n"
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Spec file exists but could not be parsed: {e}*\n"
return context
def _legacy_config_context(self, study_dir: Path) -> str:
"""Build context from legacy optimization_config.json"""
context = "**Format**: Legacy optimization_config.json\n\n"
config_path = study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = study_dir / "optimization_config.json"
@@ -135,30 +249,8 @@ Important guidelines:
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Config file exists but could not be parsed: {e}*\n"
# Check for results
db_path = study_dir / "3_results" / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(db_path)
count = conn.execute(
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
).fetchone()[0]
best = conn.execute("""
SELECT MIN(tv.value) FROM trial_values tv
JOIN trials t ON tv.trial_id = t.trial_id
WHERE t.state = 'COMPLETE'
""").fetchone()[0]
context += f"\n## Results Status\n\n"
context += f"- **Trials completed**: {count}\n"
if best is not None:
context += f"- **Best objective**: {best:.6f}\n"
conn.close()
except Exception:
pass
else:
context += "*No configuration file found.*\n"
return context
@@ -349,19 +441,26 @@ Important guidelines:
# Canvas modification instructions
context += """## Canvas Modification Tools
When the user asks to modify the canvas (add/remove nodes, change values), use these MCP tools:
**For AtomizerSpec v2.0 studies (preferred):**
Use spec tools when working with v2.0 studies (check if study uses `atomizer_spec.json`):
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
**For Legacy Canvas (optimization_config.json):**
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
- `canvas_update_node` - Update node properties (bounds, weights, names)
- `canvas_remove_node` - Remove a node from the canvas
- `canvas_connect_nodes` - Create an edge between nodes
**Example user requests you can handle:**
- "Add a design variable called hole_diameter with range 5-15 mm" → Use canvas_add_node
- "Change the weight of wfe_40_20 to 8" → Use canvas_update_node
- "Remove the constraint node" → Use canvas_remove_node
- "Connect the new extractor to the objective" → Use canvas_connect_nodes
- "Add a design variable called hole_diameter with range 5-15 mm" → Use spec_add_node or canvas_add_node
- "Change the weight of wfe_40_20 to 8" → Use spec_modify or canvas_update_node
- "Remove the constraint node" → Use spec_remove_node or canvas_remove_node
- "Add a custom extractor that computes stress ratio" → Use spec_add_custom_extractor
Always respond with confirmation of changes made to the canvas.
Always respond with confirmation of changes made to the canvas/spec.
"""
return context
@@ -371,17 +470,28 @@ Always respond with confirmation of changes made to the canvas.
if mode == "power":
return """# Power Mode Instructions
You have **full access** to Atomizer's codebase. You can:
- Edit any file using `edit_file` tool
- Create new files with `create_file` tool
- Create new extractors with `create_extractor` tool
- Run shell commands with `run_shell_command` tool
- Search codebase with `search_codebase` tool
- Commit and push changes
You have **FULL ACCESS** to modify Atomizer studies. **DO NOT ASK FOR PERMISSION** - just do it.
**Use these powers responsibly.** Always explain what you're doing and why.
## Direct Actions (no confirmation needed):
- **Add design variables**: Use `canvas_add_node` or `spec_add_node` with node_type="designVar"
- **Add extractors**: Use `canvas_add_node` with node_type="extractor"
- **Add objectives**: Use `canvas_add_node` with node_type="objective"
- **Add constraints**: Use `canvas_add_node` with node_type="constraint"
- **Update node properties**: Use `canvas_update_node` or `spec_modify`
- **Remove nodes**: Use `canvas_remove_node`
- **Edit atomizer_spec.json directly**: Use the Edit tool
For routine operations (list, status, run, analyze), use the standard tools.
## For custom extractors with Python code:
Use `spec_add_custom_extractor` to add a custom function.
## IMPORTANT:
- You have --dangerously-skip-permissions enabled
- The user has explicitly granted you power mode access
- **ACT IMMEDIATELY** when asked to add/modify/remove things
- Explain what you did AFTER doing it, not before
- Do NOT say "I need permission" - you already have it
Example: If user says "add a volume extractor", immediately use canvas_add_node to add it.
"""
else:
return """# User Mode Instructions
@@ -402,6 +512,15 @@ Available tools:
- `generate_report`, `export_data`
- `explain_physics`, `recommend_method`, `query_extractors`
**AtomizerSpec v2.0 Tools (preferred for new studies):**
- `spec_get` - Get the full AtomizerSpec for a study
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_validate` - Validate spec against JSON Schema
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
- `spec_create_from_description` - Create a new study from natural language description
**Canvas Tools (for visual workflow builder):**
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
- `execute_canvas_intent` - Create a study from a canvas intent

View File

@@ -0,0 +1,454 @@
"""
Interview Engine - Guided Study Creation through Conversation
Provides a structured interview flow for creating optimization studies.
Claude uses this to gather information step-by-step, building a complete
atomizer_spec.json through natural conversation.
"""
from typing import Dict, Any, List, Optional, Literal
from dataclasses import dataclass, field
from enum import Enum
from datetime import datetime
import json
class InterviewState(str, Enum):
"""Current phase of the interview"""
NOT_STARTED = "not_started"
GATHERING_BASICS = "gathering_basics" # Name, description, goals
GATHERING_MODEL = "gathering_model" # Model file, solver type
GATHERING_VARIABLES = "gathering_variables" # Design variables
GATHERING_EXTRACTORS = "gathering_extractors" # Physics extractors
GATHERING_OBJECTIVES = "gathering_objectives" # Objectives
GATHERING_CONSTRAINTS = "gathering_constraints" # Constraints
GATHERING_SETTINGS = "gathering_settings" # Algorithm, trials
REVIEW = "review" # Review before creation
COMPLETED = "completed"
@dataclass
class InterviewData:
"""Accumulated data from the interview"""
# Basics
study_name: Optional[str] = None
category: Optional[str] = None
description: Optional[str] = None
goals: List[str] = field(default_factory=list)
# Model
sim_file: Optional[str] = None
prt_file: Optional[str] = None
solver_type: str = "nastran"
# Design variables
design_variables: List[Dict[str, Any]] = field(default_factory=list)
# Extractors
extractors: List[Dict[str, Any]] = field(default_factory=list)
# Objectives
objectives: List[Dict[str, Any]] = field(default_factory=list)
# Constraints
constraints: List[Dict[str, Any]] = field(default_factory=list)
# Settings
algorithm: str = "TPE"
max_trials: int = 100
def to_spec(self) -> Dict[str, Any]:
"""Convert interview data to atomizer_spec.json format"""
# Generate IDs for each element
dvs_with_ids = []
for i, dv in enumerate(self.design_variables):
dv_copy = dv.copy()
dv_copy['id'] = f"dv_{i+1:03d}"
dv_copy['canvas_position'] = {'x': 50, 'y': 100 + i * 80}
dvs_with_ids.append(dv_copy)
exts_with_ids = []
for i, ext in enumerate(self.extractors):
ext_copy = ext.copy()
ext_copy['id'] = f"ext_{i+1:03d}"
ext_copy['canvas_position'] = {'x': 400, 'y': 100 + i * 80}
exts_with_ids.append(ext_copy)
objs_with_ids = []
for i, obj in enumerate(self.objectives):
obj_copy = obj.copy()
obj_copy['id'] = f"obj_{i+1:03d}"
obj_copy['canvas_position'] = {'x': 750, 'y': 100 + i * 80}
objs_with_ids.append(obj_copy)
cons_with_ids = []
for i, con in enumerate(self.constraints):
con_copy = con.copy()
con_copy['id'] = f"con_{i+1:03d}"
con_copy['canvas_position'] = {'x': 750, 'y': 400 + i * 80}
cons_with_ids.append(con_copy)
return {
"meta": {
"version": "2.0",
"study_name": self.study_name or "untitled_study",
"description": self.description or "",
"created_at": datetime.now().isoformat(),
"created_by": "interview",
"modified_at": datetime.now().isoformat(),
"modified_by": "interview"
},
"model": {
"sim": {
"path": self.sim_file or "",
"solver": self.solver_type
}
},
"design_variables": dvs_with_ids,
"extractors": exts_with_ids,
"objectives": objs_with_ids,
"constraints": cons_with_ids,
"optimization": {
"algorithm": {
"type": self.algorithm
},
"budget": {
"max_trials": self.max_trials
}
},
"canvas": {
"edges": [],
"layout_version": "2.0"
}
}
class InterviewEngine:
"""
Manages the interview flow for study creation.
Usage:
1. Create engine: engine = InterviewEngine()
2. Start interview: engine.start()
3. Record answers: engine.record_answer("study_name", "bracket_opt")
4. Check progress: engine.get_progress()
5. Generate spec: engine.finalize()
"""
def __init__(self):
self.state = InterviewState.NOT_STARTED
self.data = InterviewData()
self.questions_asked: List[str] = []
self.errors: List[str] = []
def start(self) -> Dict[str, Any]:
"""Start the interview process"""
self.state = InterviewState.GATHERING_BASICS
return {
"state": self.state.value,
"message": "Let's create a new optimization study! I'll guide you through the process.",
"next_questions": self.get_current_questions()
}
def get_current_questions(self) -> List[Dict[str, Any]]:
"""Get the questions for the current interview state"""
questions = {
InterviewState.GATHERING_BASICS: [
{
"field": "study_name",
"question": "What would you like to name this study?",
"hint": "Use snake_case, e.g., 'bracket_mass_optimization'",
"required": True
},
{
"field": "category",
"question": "What category should this study be in?",
"hint": "e.g., 'Simple_Bracket', 'M1_Mirror', or leave blank for root",
"required": False
},
{
"field": "description",
"question": "Briefly describe what you're trying to optimize",
"hint": "e.g., 'Minimize bracket mass while maintaining stiffness'",
"required": True
}
],
InterviewState.GATHERING_MODEL: [
{
"field": "sim_file",
"question": "What is the path to your simulation (.sim) file?",
"hint": "Relative path from the study folder, e.g., '1_setup/Model_sim1.sim'",
"required": True
}
],
InterviewState.GATHERING_VARIABLES: [
{
"field": "design_variable",
"question": "What parameters do you want to optimize?",
"hint": "Tell me the NX expression names and their bounds",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_EXTRACTORS: [
{
"field": "extractor",
"question": "What physics quantities do you want to extract from FEA?",
"hint": "e.g., mass, max displacement, max stress, frequency, Zernike WFE",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_OBJECTIVES: [
{
"field": "objective",
"question": "What do you want to optimize?",
"hint": "Tell me which extracted quantities to minimize or maximize",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_CONSTRAINTS: [
{
"field": "constraint",
"question": "Do you have any constraints? (e.g., max stress, min frequency)",
"hint": "You can say 'none' if you don't have any",
"required": False,
"multi": True
}
],
InterviewState.GATHERING_SETTINGS: [
{
"field": "algorithm",
"question": "Which optimization algorithm would you like to use?",
"hint": "Options: TPE (default), CMA-ES, NSGA-II, RandomSearch",
"required": False
},
{
"field": "max_trials",
"question": "How many trials (FEA evaluations) should we run?",
"hint": "Default is 100. More trials = better results but longer runtime",
"required": False
}
],
InterviewState.REVIEW: [
{
"field": "confirm",
"question": "Does this configuration look correct? (yes/no)",
"required": True
}
]
}
return questions.get(self.state, [])
def record_answer(self, field: str, value: Any) -> Dict[str, Any]:
"""Record an answer and potentially advance the state"""
self.questions_asked.append(field)
# Handle different field types
if field == "study_name":
self.data.study_name = value
elif field == "category":
self.data.category = value if value else None
elif field == "description":
self.data.description = value
elif field == "sim_file":
self.data.sim_file = value
elif field == "design_variable":
# Value should be a dict with name, min, max, etc.
if isinstance(value, dict):
self.data.design_variables.append(value)
elif isinstance(value, list):
self.data.design_variables.extend(value)
elif field == "extractor":
if isinstance(value, dict):
self.data.extractors.append(value)
elif isinstance(value, list):
self.data.extractors.extend(value)
elif field == "objective":
if isinstance(value, dict):
self.data.objectives.append(value)
elif isinstance(value, list):
self.data.objectives.extend(value)
elif field == "constraint":
if value and value.lower() not in ["none", "no", "skip"]:
if isinstance(value, dict):
self.data.constraints.append(value)
elif isinstance(value, list):
self.data.constraints.extend(value)
elif field == "algorithm":
if value in ["TPE", "CMA-ES", "NSGA-II", "RandomSearch"]:
self.data.algorithm = value
elif field == "max_trials":
try:
self.data.max_trials = int(value)
except (ValueError, TypeError):
pass
elif field == "confirm":
if value.lower() in ["yes", "y", "confirm", "ok"]:
self.state = InterviewState.COMPLETED
return {
"state": self.state.value,
"recorded": {field: value},
"data_so_far": self.get_summary()
}
def advance_state(self) -> Dict[str, Any]:
"""Advance to the next interview state"""
state_order = [
InterviewState.NOT_STARTED,
InterviewState.GATHERING_BASICS,
InterviewState.GATHERING_MODEL,
InterviewState.GATHERING_VARIABLES,
InterviewState.GATHERING_EXTRACTORS,
InterviewState.GATHERING_OBJECTIVES,
InterviewState.GATHERING_CONSTRAINTS,
InterviewState.GATHERING_SETTINGS,
InterviewState.REVIEW,
InterviewState.COMPLETED
]
current_idx = state_order.index(self.state)
if current_idx < len(state_order) - 1:
self.state = state_order[current_idx + 1]
return {
"state": self.state.value,
"next_questions": self.get_current_questions()
}
def get_summary(self) -> Dict[str, Any]:
"""Get a summary of collected data"""
return {
"study_name": self.data.study_name,
"category": self.data.category,
"description": self.data.description,
"model": self.data.sim_file,
"design_variables": len(self.data.design_variables),
"extractors": len(self.data.extractors),
"objectives": len(self.data.objectives),
"constraints": len(self.data.constraints),
"algorithm": self.data.algorithm,
"max_trials": self.data.max_trials
}
def get_progress(self) -> Dict[str, Any]:
"""Get interview progress information"""
state_progress = {
InterviewState.NOT_STARTED: 0,
InterviewState.GATHERING_BASICS: 15,
InterviewState.GATHERING_MODEL: 25,
InterviewState.GATHERING_VARIABLES: 40,
InterviewState.GATHERING_EXTRACTORS: 55,
InterviewState.GATHERING_OBJECTIVES: 70,
InterviewState.GATHERING_CONSTRAINTS: 80,
InterviewState.GATHERING_SETTINGS: 90,
InterviewState.REVIEW: 95,
InterviewState.COMPLETED: 100
}
return {
"state": self.state.value,
"progress_percent": state_progress.get(self.state, 0),
"summary": self.get_summary(),
"current_questions": self.get_current_questions()
}
def validate(self) -> Dict[str, Any]:
"""Validate the collected data before finalizing"""
errors = []
warnings = []
# Required fields
if not self.data.study_name:
errors.append("Study name is required")
if not self.data.design_variables:
errors.append("At least one design variable is required")
if not self.data.extractors:
errors.append("At least one extractor is required")
if not self.data.objectives:
errors.append("At least one objective is required")
# Warnings
if not self.data.sim_file:
warnings.append("No simulation file specified - you'll need to add one manually")
if not self.data.constraints:
warnings.append("No constraints defined - optimization will be unconstrained")
return {
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings
}
def finalize(self) -> Dict[str, Any]:
"""Generate the final atomizer_spec.json"""
validation = self.validate()
if not validation["valid"]:
return {
"success": False,
"errors": validation["errors"]
}
spec = self.data.to_spec()
return {
"success": True,
"spec": spec,
"warnings": validation.get("warnings", [])
}
def to_dict(self) -> Dict[str, Any]:
"""Serialize engine state for persistence"""
return {
"state": self.state.value,
"data": {
"study_name": self.data.study_name,
"category": self.data.category,
"description": self.data.description,
"goals": self.data.goals,
"sim_file": self.data.sim_file,
"prt_file": self.data.prt_file,
"solver_type": self.data.solver_type,
"design_variables": self.data.design_variables,
"extractors": self.data.extractors,
"objectives": self.data.objectives,
"constraints": self.data.constraints,
"algorithm": self.data.algorithm,
"max_trials": self.data.max_trials
},
"questions_asked": self.questions_asked,
"errors": self.errors
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "InterviewEngine":
"""Restore engine from serialized state"""
engine = cls()
engine.state = InterviewState(data.get("state", "not_started"))
d = data.get("data", {})
engine.data.study_name = d.get("study_name")
engine.data.category = d.get("category")
engine.data.description = d.get("description")
engine.data.goals = d.get("goals", [])
engine.data.sim_file = d.get("sim_file")
engine.data.prt_file = d.get("prt_file")
engine.data.solver_type = d.get("solver_type", "nastran")
engine.data.design_variables = d.get("design_variables", [])
engine.data.extractors = d.get("extractors", [])
engine.data.objectives = d.get("objectives", [])
engine.data.constraints = d.get("constraints", [])
engine.data.algorithm = d.get("algorithm", "TPE")
engine.data.max_trials = d.get("max_trials", 100)
engine.questions_asked = data.get("questions_asked", [])
engine.errors = data.get("errors", [])
return engine

View File

@@ -219,6 +219,18 @@ class SessionManager:
full_response = result["stdout"] or ""
if full_response:
# Check if response contains canvas modifications (from MCP tools)
import logging
logger = logging.getLogger(__name__)
modifications = self._extract_canvas_modifications(full_response)
logger.info(f"[SEND_MSG] Found {len(modifications)} canvas modifications to send")
for mod in modifications:
logger.info(f"[SEND_MSG] Sending canvas_modification: {mod.get('action')} {mod.get('nodeType')}")
yield {"type": "canvas_modification", "modification": mod}
# Always send the text response
yield {"type": "text", "content": full_response}
if result["returncode"] != 0 and result["stderr"]:
@@ -292,6 +304,90 @@ class SessionManager:
**({} if not db_record else {"db_record": db_record}),
}
def _extract_canvas_modifications(self, response: str) -> List[Dict]:
"""
Extract canvas modification objects from Claude's response.
MCP tools like canvas_add_node return JSON with a 'modification' field.
This method finds and extracts those modifications so the frontend can apply them.
"""
import re
import logging
logger = logging.getLogger(__name__)
modifications = []
# Debug: log what we're searching
logger.info(f"[CANVAS_MOD] Searching response ({len(response)} chars) for modifications")
# Check if "modification" even exists in the response
if '"modification"' not in response:
logger.info("[CANVAS_MOD] No 'modification' key found in response")
return modifications
try:
# Method 1: Look for JSON in code fences
code_block_pattern = r'```(?:json)?\s*([\s\S]*?)```'
for match in re.finditer(code_block_pattern, response):
block_content = match.group(1).strip()
try:
obj = json.loads(block_content)
if isinstance(obj, dict) and 'modification' in obj:
logger.info(f"[CANVAS_MOD] Found modification in code fence: {obj['modification']}")
modifications.append(obj['modification'])
except json.JSONDecodeError:
continue
# Method 2: Find JSON objects using proper brace matching
# This handles nested objects correctly
i = 0
while i < len(response):
if response[i] == '{':
# Found a potential JSON start, find matching close
brace_count = 1
j = i + 1
in_string = False
escape_next = False
while j < len(response) and brace_count > 0:
char = response[j]
if escape_next:
escape_next = False
elif char == '\\':
escape_next = True
elif char == '"' and not escape_next:
in_string = not in_string
elif not in_string:
if char == '{':
brace_count += 1
elif char == '}':
brace_count -= 1
j += 1
if brace_count == 0:
potential_json = response[i:j]
try:
obj = json.loads(potential_json)
if isinstance(obj, dict) and 'modification' in obj:
mod = obj['modification']
# Avoid duplicates
if mod not in modifications:
logger.info(f"[CANVAS_MOD] Found inline modification: action={mod.get('action')}, nodeType={mod.get('nodeType')}")
modifications.append(mod)
except json.JSONDecodeError as e:
# Not valid JSON, skip
pass
i = j
else:
i += 1
except Exception as e:
logger.error(f"[CANVAS_MOD] Error extracting modifications: {e}")
logger.info(f"[CANVAS_MOD] Extracted {len(modifications)} modification(s)")
return modifications
def _build_mcp_config(self, mode: Literal["user", "power"]) -> dict:
"""Build MCP configuration for Claude"""
return {

View File

@@ -0,0 +1,747 @@
"""
SpecManager Service
Central service for managing AtomizerSpec v2.0.
All spec modifications flow through this service.
Features:
- Load/save specs with validation
- Atomic writes with conflict detection
- Patch operations with JSONPath support
- Node CRUD operations
- Custom function support
- WebSocket broadcast integration
"""
import hashlib
import json
import re
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
# Add optimization_engine to path if needed
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
if str(ATOMIZER_ROOT) not in sys.path:
sys.path.insert(0, str(ATOMIZER_ROOT))
from optimization_engine.config.spec_models import (
AtomizerSpec,
DesignVariable,
Extractor,
Objective,
Constraint,
CanvasPosition,
CanvasEdge,
ExtractorType,
CustomFunction,
ExtractorOutput,
ValidationReport,
)
from optimization_engine.config.spec_validator import (
SpecValidator,
SpecValidationError,
)
class SpecManagerError(Exception):
"""Base error for SpecManager operations."""
pass
class SpecNotFoundError(SpecManagerError):
"""Raised when spec file doesn't exist."""
pass
class SpecConflictError(SpecManagerError):
"""Raised when spec has been modified by another client."""
def __init__(self, message: str, current_hash: str):
super().__init__(message)
self.current_hash = current_hash
class WebSocketSubscriber:
"""Protocol for WebSocket subscribers."""
async def send_json(self, data: Dict[str, Any]) -> None:
"""Send JSON data to subscriber."""
raise NotImplementedError
class SpecManager:
"""
Central service for managing AtomizerSpec.
All modifications go through this service to ensure:
- Validation on every change
- Atomic file writes
- Conflict detection via hashing
- WebSocket broadcast to all clients
"""
SPEC_FILENAME = "atomizer_spec.json"
def __init__(self, study_path: Union[str, Path]):
"""
Initialize SpecManager for a study.
Args:
study_path: Path to the study directory
"""
self.study_path = Path(study_path)
self.spec_path = self.study_path / self.SPEC_FILENAME
self.validator = SpecValidator()
self._subscribers: List[WebSocketSubscriber] = []
self._last_hash: Optional[str] = None
# =========================================================================
# Core CRUD Operations
# =========================================================================
def load(self, validate: bool = True) -> AtomizerSpec:
"""
Load and optionally validate the spec.
Args:
validate: Whether to validate the spec
Returns:
AtomizerSpec instance
Raises:
SpecNotFoundError: If spec file doesn't exist
SpecValidationError: If validation fails
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
with open(self.spec_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if validate:
self.validator.validate(data, strict=True)
spec = AtomizerSpec.model_validate(data)
self._last_hash = self._compute_hash(data)
return spec
def load_raw(self) -> Dict[str, Any]:
"""
Load spec as raw dict without parsing.
Returns:
Raw spec dict
Raises:
SpecNotFoundError: If spec file doesn't exist
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
with open(self.spec_path, 'r', encoding='utf-8') as f:
return json.load(f)
def save(
self,
spec: Union[AtomizerSpec, Dict[str, Any]],
modified_by: str = "api",
expected_hash: Optional[str] = None
) -> str:
"""
Save spec with validation and broadcast.
Args:
spec: Spec to save (AtomizerSpec or dict)
modified_by: Who/what is making the change
expected_hash: If provided, verify current file hash matches
Returns:
New spec hash
Raises:
SpecValidationError: If validation fails
SpecConflictError: If expected_hash doesn't match current
"""
# Convert to dict if needed
if isinstance(spec, AtomizerSpec):
data = spec.model_dump(mode='json')
else:
data = spec
# Check for conflicts if expected_hash provided
if expected_hash and self.spec_path.exists():
current_hash = self.get_hash()
if current_hash != expected_hash:
raise SpecConflictError(
"Spec was modified by another client",
current_hash=current_hash
)
# Update metadata
now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
data["meta"]["modified"] = now
data["meta"]["modified_by"] = modified_by
# Validate
self.validator.validate(data, strict=True)
# Compute new hash
new_hash = self._compute_hash(data)
# Atomic write (write to temp, then rename)
temp_path = self.spec_path.with_suffix('.tmp')
with open(temp_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
temp_path.replace(self.spec_path)
# Update cached hash
self._last_hash = new_hash
# Broadcast to subscribers
self._broadcast({
"type": "spec_updated",
"hash": new_hash,
"modified_by": modified_by,
"timestamp": now
})
return new_hash
def exists(self) -> bool:
"""Check if spec file exists."""
return self.spec_path.exists()
def get_hash(self) -> str:
"""Get current spec hash."""
if not self.spec_path.exists():
return ""
with open(self.spec_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return self._compute_hash(data)
def validate_and_report(self) -> ValidationReport:
"""
Run full validation and return detailed report.
Returns:
ValidationReport with errors, warnings, summary
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
data = self.load_raw()
return self.validator.validate(data, strict=False)
# =========================================================================
# Patch Operations
# =========================================================================
def patch(
self,
path: str,
value: Any,
modified_by: str = "api"
) -> AtomizerSpec:
"""
Apply a JSONPath-style modification.
Args:
path: JSONPath like "design_variables[0].bounds.max"
value: New value to set
modified_by: Who/what is making the change
Returns:
Updated AtomizerSpec
"""
data = self.load_raw()
# Validate the partial update
spec = AtomizerSpec.model_validate(data)
is_valid, errors = self.validator.validate_partial(path, value, spec)
if not is_valid:
raise SpecValidationError(f"Invalid update: {'; '.join(errors)}")
# Apply the patch
self._apply_patch(data, path, value)
# Save and return
self.save(data, modified_by)
return self.load(validate=False)
def _apply_patch(self, data: Dict, path: str, value: Any) -> None:
"""
Apply a patch to the data dict.
Supports paths like:
- "meta.description"
- "design_variables[0].bounds.max"
- "objectives[1].weight"
"""
parts = self._parse_path(path)
if not parts:
raise ValueError(f"Invalid path: {path}")
# Navigate to parent
current = data
for part in parts[:-1]:
if isinstance(current, list):
idx = int(part)
current = current[idx]
else:
current = current[part]
# Set final value
final_key = parts[-1]
if isinstance(current, list):
idx = int(final_key)
current[idx] = value
else:
current[final_key] = value
def _parse_path(self, path: str) -> List[str]:
"""Parse JSONPath into parts."""
# Handle both dot notation and bracket notation
parts = []
for part in re.split(r'\.|\[|\]', path):
if part:
parts.append(part)
return parts
# =========================================================================
# Node Operations
# =========================================================================
def add_node(
self,
node_type: str,
node_data: Dict[str, Any],
modified_by: str = "canvas"
) -> str:
"""
Add a new node (design var, extractor, objective, constraint).
Args:
node_type: One of 'designVar', 'extractor', 'objective', 'constraint'
node_data: Node data without ID
modified_by: Who/what is making the change
Returns:
Generated node ID
"""
data = self.load_raw()
# Generate ID
node_id = self._generate_id(node_type, data)
node_data["id"] = node_id
# Add canvas position if not provided
if "canvas_position" not in node_data:
node_data["canvas_position"] = self._auto_position(node_type, data)
# Add to appropriate section
section = self._get_section_for_type(node_type)
if section not in data or data[section] is None:
data[section] = []
data[section].append(node_data)
self.save(data, modified_by)
# Broadcast node addition
self._broadcast({
"type": "node_added",
"node_type": node_type,
"node_id": node_id,
"modified_by": modified_by
})
return node_id
def update_node(
self,
node_id: str,
updates: Dict[str, Any],
modified_by: str = "canvas"
) -> None:
"""
Update an existing node.
Args:
node_id: ID of the node to update
updates: Dict of fields to update
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find and update the node
found = False
for section in ["design_variables", "extractors", "objectives", "constraints"]:
if section not in data or data[section] is None:
continue
for node in data[section]:
if node.get("id") == node_id:
node.update(updates)
found = True
break
if found:
break
if not found:
raise SpecManagerError(f"Node not found: {node_id}")
self.save(data, modified_by)
def remove_node(
self,
node_id: str,
modified_by: str = "canvas"
) -> None:
"""
Remove a node and all edges referencing it.
Args:
node_id: ID of the node to remove
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find and remove node
removed = False
for section in ["design_variables", "extractors", "objectives", "constraints"]:
if section not in data or data[section] is None:
continue
original_len = len(data[section])
data[section] = [n for n in data[section] if n.get("id") != node_id]
if len(data[section]) < original_len:
removed = True
break
if not removed:
raise SpecManagerError(f"Node not found: {node_id}")
# Remove edges referencing this node
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
data["canvas"]["edges"] = [
e for e in data["canvas"]["edges"]
if e.get("source") != node_id and e.get("target") != node_id
]
self.save(data, modified_by)
# Broadcast node removal
self._broadcast({
"type": "node_removed",
"node_id": node_id,
"modified_by": modified_by
})
def update_node_position(
self,
node_id: str,
position: Dict[str, float],
modified_by: str = "canvas"
) -> None:
"""
Update a node's canvas position.
Args:
node_id: ID of the node
position: Dict with x, y coordinates
modified_by: Who/what is making the change
"""
self.update_node(node_id, {"canvas_position": position}, modified_by)
def add_edge(
self,
source: str,
target: str,
modified_by: str = "canvas"
) -> None:
"""
Add a canvas edge between nodes.
Args:
source: Source node ID
target: Target node ID
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Initialize canvas section if needed
if "canvas" not in data or data["canvas"] is None:
data["canvas"] = {}
if "edges" not in data["canvas"] or data["canvas"]["edges"] is None:
data["canvas"]["edges"] = []
# Check for duplicate
for edge in data["canvas"]["edges"]:
if edge.get("source") == source and edge.get("target") == target:
return # Already exists
data["canvas"]["edges"].append({
"source": source,
"target": target
})
self.save(data, modified_by)
def remove_edge(
self,
source: str,
target: str,
modified_by: str = "canvas"
) -> None:
"""
Remove a canvas edge.
Args:
source: Source node ID
target: Target node ID
modified_by: Who/what is making the change
"""
data = self.load_raw()
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
data["canvas"]["edges"] = [
e for e in data["canvas"]["edges"]
if not (e.get("source") == source and e.get("target") == target)
]
self.save(data, modified_by)
# =========================================================================
# Custom Function Support
# =========================================================================
def add_custom_function(
self,
name: str,
code: str,
outputs: List[str],
description: Optional[str] = None,
modified_by: str = "claude"
) -> str:
"""
Add a custom extractor function.
Args:
name: Function name
code: Python source code
outputs: List of output names
description: Optional description
modified_by: Who/what is making the change
Returns:
Generated extractor ID
Raises:
SpecValidationError: If Python syntax is invalid
"""
# Validate Python syntax
try:
compile(code, f"<custom:{name}>", "exec")
except SyntaxError as e:
raise SpecValidationError(
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
)
data = self.load_raw()
# Generate extractor ID
ext_id = self._generate_id("extractor", data)
# Create extractor
extractor = {
"id": ext_id,
"name": description or f"Custom: {name}",
"type": "custom_function",
"builtin": False,
"function": {
"name": name,
"module": "custom_extractors.dynamic",
"source_code": code
},
"outputs": [{"name": o, "metric": "custom"} for o in outputs],
"canvas_position": self._auto_position("extractor", data)
}
data["extractors"].append(extractor)
self.save(data, modified_by)
return ext_id
def update_custom_function(
self,
extractor_id: str,
code: Optional[str] = None,
outputs: Optional[List[str]] = None,
modified_by: str = "claude"
) -> None:
"""
Update an existing custom function.
Args:
extractor_id: ID of the custom extractor
code: New Python code (optional)
outputs: New outputs (optional)
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find the extractor
extractor = None
for ext in data.get("extractors", []):
if ext.get("id") == extractor_id:
extractor = ext
break
if not extractor:
raise SpecManagerError(f"Extractor not found: {extractor_id}")
if extractor.get("type") != "custom_function":
raise SpecManagerError(f"Extractor {extractor_id} is not a custom function")
# Update code
if code is not None:
try:
compile(code, f"<custom:{extractor_id}>", "exec")
except SyntaxError as e:
raise SpecValidationError(
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
)
if "function" not in extractor:
extractor["function"] = {}
extractor["function"]["source_code"] = code
# Update outputs
if outputs is not None:
extractor["outputs"] = [{"name": o, "metric": "custom"} for o in outputs]
self.save(data, modified_by)
# =========================================================================
# WebSocket Subscription
# =========================================================================
def subscribe(self, subscriber: WebSocketSubscriber) -> None:
"""Subscribe to spec changes."""
if subscriber not in self._subscribers:
self._subscribers.append(subscriber)
def unsubscribe(self, subscriber: WebSocketSubscriber) -> None:
"""Unsubscribe from spec changes."""
if subscriber in self._subscribers:
self._subscribers.remove(subscriber)
def _broadcast(self, message: Dict[str, Any]) -> None:
"""Broadcast message to all subscribers."""
import asyncio
for subscriber in self._subscribers:
try:
# Handle both sync and async contexts
try:
loop = asyncio.get_running_loop()
loop.create_task(subscriber.send_json(message))
except RuntimeError:
# No running loop, try direct call if possible
pass
except Exception:
# Subscriber may have disconnected
pass
# =========================================================================
# Helper Methods
# =========================================================================
def _compute_hash(self, data: Dict) -> str:
"""Compute hash of spec data for conflict detection."""
# Sort keys for consistent hashing
json_str = json.dumps(data, sort_keys=True, ensure_ascii=False)
return hashlib.sha256(json_str.encode()).hexdigest()[:16]
def _generate_id(self, node_type: str, data: Dict) -> str:
"""Generate unique ID for a node type."""
prefix_map = {
"designVar": "dv",
"design_variable": "dv",
"extractor": "ext",
"objective": "obj",
"constraint": "con"
}
prefix = prefix_map.get(node_type, node_type[:3])
# Find existing IDs
section = self._get_section_for_type(node_type)
existing_ids: Set[str] = set()
if section in data and data[section]:
existing_ids = {n.get("id", "") for n in data[section]}
# Generate next available ID
for i in range(1, 1000):
new_id = f"{prefix}_{i:03d}"
if new_id not in existing_ids:
return new_id
raise SpecManagerError(f"Cannot generate ID for {node_type}: too many nodes")
def _get_section_for_type(self, node_type: str) -> str:
"""Map node type to spec section name."""
section_map = {
"designVar": "design_variables",
"design_variable": "design_variables",
"extractor": "extractors",
"objective": "objectives",
"constraint": "constraints"
}
return section_map.get(node_type, node_type + "s")
def _auto_position(self, node_type: str, data: Dict) -> Dict[str, float]:
"""Calculate auto position for a new node."""
# Default x positions by type
x_positions = {
"designVar": 50,
"design_variable": 50,
"extractor": 740,
"objective": 1020,
"constraint": 1020
}
x = x_positions.get(node_type, 400)
# Find max y position for this type
section = self._get_section_for_type(node_type)
max_y = 0
if section in data and data[section]:
for node in data[section]:
pos = node.get("canvas_position", {})
y = pos.get("y", 0)
if y > max_y:
max_y = y
# Place below existing nodes
y = max_y + 100 if max_y > 0 else 100
return {"x": x, "y": y}
# =========================================================================
# Factory Function
# =========================================================================
def get_spec_manager(study_path: Union[str, Path]) -> SpecManager:
"""
Get a SpecManager instance for a study.
Args:
study_path: Path to the study directory
Returns:
SpecManager instance
"""
return SpecManager(study_path)

View File

@@ -30,6 +30,7 @@ function App() {
{/* Canvas page - full screen, no sidebar */}
<Route path="canvas" element={<CanvasView />} />
<Route path="canvas/*" element={<CanvasView />} />
{/* Study pages - with sidebar layout */}
<Route element={<MainLayout />}>

View File

@@ -26,8 +26,8 @@ interface DesignVariable {
name: string;
parameter?: string; // Optional: the actual parameter name if different from name
unit?: string;
min: number;
max: number;
min?: number;
max?: number;
}
interface Constraint {

View File

@@ -8,14 +8,15 @@ import { ScatterChart, Scatter, Line, XAxis, YAxis, CartesianGrid, Tooltip, Cell
interface ParetoTrial {
trial_number: number;
values: [number, number];
values: number[]; // Support variable number of objectives
params: Record<string, number>;
constraint_satisfied?: boolean;
}
interface Objective {
name: string;
type: 'minimize' | 'maximize';
type?: 'minimize' | 'maximize';
direction?: 'minimize' | 'maximize'; // Alternative field used by some configs
unit?: string;
}

View File

@@ -1,5 +1,6 @@
// Main Canvas Component
export { AtomizerCanvas } from './AtomizerCanvas';
export { SpecRenderer } from './SpecRenderer';
// Palette
export { NodePalette } from './palette/NodePalette';

View File

@@ -5,7 +5,7 @@ import { ToolCallCard, ToolCall } from './ToolCallCard';
export interface Message {
id: string;
role: 'user' | 'assistant';
role: 'user' | 'assistant' | 'system';
content: string;
timestamp: Date;
isStreaming?: boolean;
@@ -18,6 +18,18 @@ interface ChatMessageProps {
export const ChatMessage: React.FC<ChatMessageProps> = ({ message }) => {
const isAssistant = message.role === 'assistant';
const isSystem = message.role === 'system';
// System messages are displayed centered with special styling
if (isSystem) {
return (
<div className="flex justify-center my-2">
<div className="px-3 py-1 bg-dark-700/50 rounded-full text-xs text-dark-400 border border-dark-600">
{message.content}
</div>
</div>
);
}
return (
<div

View File

@@ -1,4 +1,4 @@
import React, { useRef, useEffect, useState } from 'react';
import React, { useRef, useEffect, useState, useMemo } from 'react';
import {
MessageSquare,
ChevronRight,
@@ -13,8 +13,10 @@ import { ChatMessage } from './ChatMessage';
import { ChatInput } from './ChatInput';
import { ThinkingIndicator } from './ThinkingIndicator';
import { ModeToggle } from './ModeToggle';
import { useChat } from '../../hooks/useChat';
import { useChat, CanvasState, CanvasModification } from '../../hooks/useChat';
import { useStudy } from '../../context/StudyContext';
import { useCanvasStore } from '../../hooks/useCanvasStore';
import { NodeType } from '../../lib/canvas/schema';
interface ChatPaneProps {
isOpen: boolean;
@@ -31,6 +33,76 @@ export const ChatPane: React.FC<ChatPaneProps> = ({
const messagesEndRef = useRef<HTMLDivElement>(null);
const [isExpanded, setIsExpanded] = useState(false);
// Get canvas state and modification functions from the store
const { nodes, edges, addNode, updateNodeData, selectNode, deleteSelected } = useCanvasStore();
// Build canvas state for chat context
const canvasState: CanvasState | null = useMemo(() => {
if (nodes.length === 0) return null;
return {
nodes: nodes.map(n => ({
id: n.id,
type: n.type,
data: n.data,
position: n.position,
})),
edges: edges.map(e => ({
id: e.id,
source: e.source,
target: e.target,
})),
studyName: selectedStudy?.name || selectedStudy?.id,
};
}, [nodes, edges, selectedStudy]);
// Track position offset for multiple node additions
const nodeAddCountRef = useRef(0);
// Handle canvas modifications from the assistant
const handleCanvasModification = React.useCallback((modification: CanvasModification) => {
console.log('Canvas modification from assistant:', modification);
switch (modification.action) {
case 'add_node':
if (modification.nodeType) {
const nodeType = modification.nodeType as NodeType;
// Calculate position: offset each new node so they don't stack
const basePosition = modification.position || { x: 100, y: 100 };
const offset = nodeAddCountRef.current * 120;
const position = {
x: basePosition.x,
y: basePosition.y + offset,
};
nodeAddCountRef.current += 1;
// Reset counter after a delay (for batch operations)
setTimeout(() => { nodeAddCountRef.current = 0; }, 2000);
addNode(nodeType, position, modification.data);
console.log(`Added ${nodeType} node at position:`, position);
}
break;
case 'update_node':
if (modification.nodeId && modification.data) {
updateNodeData(modification.nodeId, modification.data);
}
break;
case 'remove_node':
if (modification.nodeId) {
selectNode(modification.nodeId);
deleteSelected();
}
break;
// Edge operations would need additional store methods
case 'add_edge':
case 'remove_edge':
console.warn('Edge modification not yet implemented:', modification);
break;
}
}, [addNode, updateNodeData, selectNode, deleteSelected]);
const {
messages,
isThinking,
@@ -41,22 +113,38 @@ export const ChatPane: React.FC<ChatPaneProps> = ({
sendMessage,
clearMessages,
switchMode,
updateCanvasState,
} = useChat({
studyId: selectedStudy?.id,
mode: 'user',
useWebSocket: true,
canvasState,
onError: (err) => console.error('Chat error:', err),
onCanvasModification: handleCanvasModification,
});
// Keep canvas state synced with chat
useEffect(() => {
updateCanvasState(canvasState);
}, [canvasState, updateCanvasState]);
// Auto-scroll to bottom when new messages arrive
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [messages, isThinking]);
// Welcome message based on study context
const welcomeMessage = selectedStudy
? `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`
: 'Select a study to get started, or ask me to help you create a new one.';
// Welcome message based on study and canvas context
const welcomeMessage = useMemo(() => {
if (selectedStudy) {
return `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`;
}
if (nodes.length > 0) {
const dvCount = nodes.filter(n => n.type === 'designVar').length;
const objCount = nodes.filter(n => n.type === 'objective').length;
return `I can see your canvas with ${dvCount} design variables and ${objCount} objectives. Ask me to analyze, validate, or create a study from this setup.`;
}
return 'Select a study to get started, or build an optimization in the Canvas Builder.';
}, [selectedStudy, nodes]);
// Collapsed state - just show toggle button
if (!isOpen) {

View File

@@ -30,22 +30,25 @@ interface ToolCallCardProps {
}
// Map tool names to friendly labels and icons
const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ className?: string }> }> = {
const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ className?: string }>; color?: string }> = {
// Study tools
list_studies: { label: 'Listing Studies', icon: Database },
get_study_status: { label: 'Getting Status', icon: FileSearch },
create_study: { label: 'Creating Study', icon: Settings },
create_study: { label: 'Creating Study', icon: Settings, color: 'text-green-400' },
// Optimization tools
run_optimization: { label: 'Starting Optimization', icon: Play },
run_optimization: { label: 'Starting Optimization', icon: Play, color: 'text-blue-400' },
stop_optimization: { label: 'Stopping Optimization', icon: XCircle },
get_optimization_status: { label: 'Checking Progress', icon: BarChart2 },
// Analysis tools
get_trial_data: { label: 'Querying Trials', icon: Database },
query_trials: { label: 'Querying Trials', icon: Database },
get_trial_details: { label: 'Getting Trial Details', icon: FileSearch },
analyze_convergence: { label: 'Analyzing Convergence', icon: BarChart2 },
compare_trials: { label: 'Comparing Trials', icon: BarChart2 },
get_best_design: { label: 'Getting Best Design', icon: CheckCircle },
get_optimization_summary: { label: 'Getting Summary', icon: BarChart2 },
// Reporting tools
generate_report: { label: 'Generating Report', icon: FileText },
@@ -56,6 +59,25 @@ const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ cla
recommend_method: { label: 'Recommending Method', icon: Settings },
query_extractors: { label: 'Listing Extractors', icon: Database },
// Config tools (read)
read_study_config: { label: 'Reading Config', icon: FileSearch },
read_study_readme: { label: 'Reading README', icon: FileText },
// === WRITE TOOLS (Power Mode) ===
add_design_variable: { label: 'Adding Design Variable', icon: Settings, color: 'text-amber-400' },
add_extractor: { label: 'Adding Extractor', icon: Settings, color: 'text-amber-400' },
add_objective: { label: 'Adding Objective', icon: Settings, color: 'text-amber-400' },
add_constraint: { label: 'Adding Constraint', icon: Settings, color: 'text-amber-400' },
update_spec_field: { label: 'Updating Field', icon: Settings, color: 'text-amber-400' },
remove_node: { label: 'Removing Node', icon: XCircle, color: 'text-red-400' },
// === INTERVIEW TOOLS ===
start_interview: { label: 'Starting Interview', icon: HelpCircle, color: 'text-purple-400' },
interview_record: { label: 'Recording Answer', icon: CheckCircle, color: 'text-purple-400' },
interview_advance: { label: 'Advancing Interview', icon: Play, color: 'text-purple-400' },
interview_status: { label: 'Checking Progress', icon: BarChart2, color: 'text-purple-400' },
interview_finalize: { label: 'Creating Study', icon: CheckCircle, color: 'text-green-400' },
// Admin tools (power mode)
edit_file: { label: 'Editing File', icon: FileText },
create_file: { label: 'Creating File', icon: FileText },
@@ -104,7 +126,7 @@ export const ToolCallCard: React.FC<ToolCallCardProps> = ({ toolCall }) => {
)}
{/* Tool icon */}
<Icon className="w-4 h-4 text-dark-400 flex-shrink-0" />
<Icon className={`w-4 h-4 flex-shrink-0 ${info.color || 'text-dark-400'}`} />
{/* Label */}
<span className="flex-1 text-sm text-dark-200 truncate">{info.label}</span>

View File

@@ -3,3 +3,27 @@ export { useCanvasStore } from './useCanvasStore';
export type { OptimizationConfig } from './useCanvasStore';
export { useCanvasChat } from './useCanvasChat';
export { useIntentParser } from './useIntentParser';
// Spec Store (AtomizerSpec v2.0)
export {
useSpecStore,
useSpec,
useSpecLoading,
useSpecError,
useSpecValidation,
useSelectedNodeId,
useSelectedEdgeId,
useSpecHash,
useSpecIsDirty,
useDesignVariables,
useExtractors,
useObjectives,
useConstraints,
useCanvasEdges,
useSelectedNode,
} from './useSpecStore';
// WebSocket Sync
export { useSpecWebSocket } from './useSpecWebSocket';
export type { ConnectionStatus } from './useSpecWebSocket';
export { ConnectionStatusIndicator } from '../components/canvas/ConnectionStatusIndicator';

View File

@@ -11,12 +11,25 @@ export interface CanvasState {
studyPath?: string;
}
export interface CanvasModification {
action: 'add_node' | 'update_node' | 'remove_node' | 'add_edge' | 'remove_edge';
nodeType?: string;
nodeId?: string;
edgeId?: string;
data?: Record<string, any>;
source?: string;
target?: string;
position?: { x: number; y: number };
}
interface UseChatOptions {
studyId?: string | null;
mode?: ChatMode;
useWebSocket?: boolean;
canvasState?: CanvasState | null;
onError?: (error: string) => void;
onCanvasModification?: (modification: CanvasModification) => void;
onSpecUpdated?: (spec: any) => void; // Called when Claude modifies the spec
}
interface ChatState {
@@ -35,6 +48,8 @@ export function useChat({
useWebSocket = true,
canvasState: initialCanvasState,
onError,
onCanvasModification,
onSpecUpdated,
}: UseChatOptions = {}) {
const [state, setState] = useState<ChatState>({
messages: [],
@@ -49,6 +64,23 @@ export function useChat({
// Track canvas state for sending with messages
const canvasStateRef = useRef<CanvasState | null>(initialCanvasState || null);
// Sync mode prop changes to internal state (triggers WebSocket reconnect)
useEffect(() => {
if (mode !== state.mode) {
console.log(`[useChat] Mode prop changed from ${state.mode} to ${mode}, triggering reconnect`);
// Close existing WebSocket
wsRef.current?.close();
wsRef.current = null;
// Update internal state to trigger reconnect
setState((prev) => ({
...prev,
mode,
sessionId: null,
isConnected: false,
}));
}
}, [mode]);
const abortControllerRef = useRef<AbortController | null>(null);
const conversationHistoryRef = useRef<Array<{ role: string; content: string }>>([]);
const wsRef = useRef<WebSocket | null>(null);
@@ -82,9 +114,16 @@ export function useChat({
const data = await response.json();
setState((prev) => ({ ...prev, sessionId: data.session_id }));
// Connect WebSocket
// Connect WebSocket - use backend directly in dev mode
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
const wsUrl = `${protocol}//${window.location.host}/api/claude/sessions/${data.session_id}/ws`;
// Use port 8001 to match start-dashboard.bat
const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
// Both modes use the same WebSocket - mode is handled by session config
// Power mode uses --dangerously-skip-permissions in CLI
// User mode uses --allowedTools to restrict access
const wsPath = `/api/claude/sessions/${data.session_id}/ws`;
const wsUrl = `${protocol}//${backendHost}${wsPath}`;
console.log(`[useChat] Connecting to WebSocket (${state.mode} mode): ${wsUrl}`);
const ws = new WebSocket(wsUrl);
ws.onopen = () => {
@@ -126,6 +165,9 @@ export function useChat({
// Handle WebSocket messages
const handleWebSocketMessage = useCallback((data: any) => {
// Debug: log all incoming WebSocket messages
console.log('[useChat] WebSocket message received:', data.type, data);
switch (data.type) {
case 'text':
currentMessageRef.current += data.content || '';
@@ -212,11 +254,51 @@ export function useChat({
// Canvas state was updated - could show notification
break;
case 'canvas_modification':
// Assistant wants to modify the canvas (from MCP tools in user mode)
console.log('[useChat] Received canvas_modification:', data.modification);
if (onCanvasModification && data.modification) {
console.log('[useChat] Calling onCanvasModification callback');
onCanvasModification(data.modification);
} else {
console.warn('[useChat] canvas_modification received but no handler or modification:', {
hasCallback: !!onCanvasModification,
modification: data.modification
});
}
break;
case 'spec_updated':
// Assistant modified the spec - we receive the full updated spec
console.log('[useChat] Spec updated by assistant:', data.tool, data.reason);
if (onSpecUpdated && data.spec) {
// Directly update the canvas with the new spec
onSpecUpdated(data.spec);
}
break;
case 'spec_modified':
// Legacy: Assistant modified the spec directly (from power mode write tools)
console.log('[useChat] Spec was modified by assistant (legacy):', data.tool, data.changes);
// Treat this as a canvas modification to trigger reload
if (onCanvasModification) {
// Create a synthetic modification event to trigger canvas refresh
onCanvasModification({
action: 'add_node', // Use add_node as it triggers refresh
data: {
_refresh: true,
tool: data.tool,
changes: data.changes,
},
});
}
break;
case 'pong':
// Heartbeat response - ignore
break;
}
}, [onError]);
}, [onError, onCanvasModification]);
// Switch mode (requires new session)
const switchMode = useCallback(async (newMode: ChatMode) => {
@@ -462,6 +544,18 @@ export function useChat({
}
}, [useWebSocket]);
// Notify backend when user edits canvas (so Claude sees the changes)
const notifyCanvasEdit = useCallback((spec: any) => {
if (useWebSocket && wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(
JSON.stringify({
type: 'canvas_edit',
spec: spec,
})
);
}
}, [useWebSocket]);
return {
messages: state.messages,
isThinking: state.isThinking,
@@ -475,5 +569,6 @@ export function useChat({
cancelRequest,
switchMode,
updateCanvasState,
notifyCanvasEdit,
};
}

View File

@@ -0,0 +1,349 @@
/**
* Hook for Claude Code CLI integration
*
* Connects to backend that spawns actual Claude Code CLI processes.
* This gives full power: file editing, command execution, etc.
*
* Unlike useChat (which uses MCP tools), this hook:
* - Spawns actual Claude Code CLI in the backend
* - Has full file system access
* - Can edit files directly (not just return instructions)
* - Uses Opus 4.5 model
* - Has all Claude Code capabilities
*/
import { useState, useCallback, useRef, useEffect } from 'react';
import { Message } from '../components/chat/ChatMessage';
import { useCanvasStore } from './useCanvasStore';
export interface CanvasState {
nodes: any[];
edges: any[];
studyName?: string;
studyPath?: string;
}
interface UseClaudeCodeOptions {
studyId?: string | null;
canvasState?: CanvasState | null;
onError?: (error: string) => void;
onCanvasRefresh?: (studyId: string) => void;
}
interface ClaudeCodeState {
messages: Message[];
isThinking: boolean;
error: string | null;
sessionId: string | null;
isConnected: boolean;
workingDir: string | null;
}
export function useClaudeCode({
studyId,
canvasState: initialCanvasState,
onError,
onCanvasRefresh,
}: UseClaudeCodeOptions = {}) {
const [state, setState] = useState<ClaudeCodeState>({
messages: [],
isThinking: false,
error: null,
sessionId: null,
isConnected: false,
workingDir: null,
});
// Track canvas state for sending with messages
const canvasStateRef = useRef<CanvasState | null>(initialCanvasState || null);
const wsRef = useRef<WebSocket | null>(null);
const currentMessageRef = useRef<string>('');
const reconnectAttempts = useRef(0);
const maxReconnectAttempts = 3;
// Keep canvas state in sync with prop changes
useEffect(() => {
if (initialCanvasState) {
canvasStateRef.current = initialCanvasState;
}
}, [initialCanvasState]);
// Get canvas store for auto-refresh
const { loadFromConfig } = useCanvasStore();
// Connect to Claude Code WebSocket
useEffect(() => {
const connect = () => {
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
// In development, connect directly to backend (bypass Vite proxy for WebSockets)
// Use port 8001 to match start-dashboard.bat
const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
// Use study-specific endpoint if studyId provided
const wsUrl = studyId
? `${protocol}//${backendHost}/api/claude-code/ws/${encodeURIComponent(studyId)}`
: `${protocol}//${backendHost}/api/claude-code/ws`;
console.log('[ClaudeCode] Connecting to:', wsUrl);
const ws = new WebSocket(wsUrl);
ws.onopen = () => {
console.log('[ClaudeCode] Connected');
setState((prev) => ({ ...prev, isConnected: true, error: null }));
reconnectAttempts.current = 0;
// If no studyId in URL, send init message
if (!studyId) {
ws.send(JSON.stringify({ type: 'init', study_id: null }));
}
};
ws.onclose = () => {
console.log('[ClaudeCode] Disconnected');
setState((prev) => ({ ...prev, isConnected: false }));
// Attempt reconnection
if (reconnectAttempts.current < maxReconnectAttempts) {
reconnectAttempts.current++;
console.log(`[ClaudeCode] Reconnecting... attempt ${reconnectAttempts.current}`);
setTimeout(connect, 2000 * reconnectAttempts.current);
}
};
ws.onerror = (event) => {
console.error('[ClaudeCode] WebSocket error:', event);
setState((prev) => ({ ...prev, isConnected: false }));
onError?.('Claude Code connection error');
};
ws.onmessage = (event) => {
try {
const data = JSON.parse(event.data);
handleWebSocketMessage(data);
} catch (e) {
console.error('[ClaudeCode] Failed to parse message:', e);
}
};
wsRef.current = ws;
};
connect();
return () => {
reconnectAttempts.current = maxReconnectAttempts; // Prevent reconnection on unmount
wsRef.current?.close();
wsRef.current = null;
};
}, [studyId]);
// Handle WebSocket messages
const handleWebSocketMessage = useCallback(
(data: any) => {
switch (data.type) {
case 'initialized':
console.log('[ClaudeCode] Session initialized:', data.session_id);
setState((prev) => ({
...prev,
sessionId: data.session_id,
workingDir: data.working_dir || null,
}));
break;
case 'text':
currentMessageRef.current += data.content || '';
setState((prev) => ({
...prev,
messages: prev.messages.map((msg, idx) =>
idx === prev.messages.length - 1 && msg.role === 'assistant'
? { ...msg, content: currentMessageRef.current }
: msg
),
}));
break;
case 'done':
setState((prev) => ({
...prev,
isThinking: false,
messages: prev.messages.map((msg, idx) =>
idx === prev.messages.length - 1 && msg.role === 'assistant'
? { ...msg, isStreaming: false }
: msg
),
}));
currentMessageRef.current = '';
break;
case 'error':
console.error('[ClaudeCode] Error:', data.content);
setState((prev) => ({
...prev,
isThinking: false,
error: data.content || 'Unknown error',
}));
onError?.(data.content || 'Unknown error');
currentMessageRef.current = '';
break;
case 'refresh_canvas':
// Claude made file changes - trigger canvas refresh
console.log('[ClaudeCode] Canvas refresh requested:', data.reason);
if (data.study_id) {
onCanvasRefresh?.(data.study_id);
reloadCanvasFromStudy(data.study_id);
}
break;
case 'canvas_updated':
console.log('[ClaudeCode] Canvas state updated');
break;
case 'pong':
// Heartbeat response
break;
default:
console.log('[ClaudeCode] Unknown message type:', data.type);
}
},
[onError, onCanvasRefresh]
);
// Reload canvas from study config
const reloadCanvasFromStudy = useCallback(
async (studyIdToReload: string) => {
try {
console.log('[ClaudeCode] Reloading canvas for study:', studyIdToReload);
// Fetch fresh config from backend
const response = await fetch(`/api/optimization/studies/${encodeURIComponent(studyIdToReload)}/config`);
if (!response.ok) {
throw new Error(`Failed to fetch config: ${response.status}`);
}
const data = await response.json();
const config = data.config; // API returns { config: ..., path: ..., study_id: ... }
// Reload canvas with new config
loadFromConfig(config);
// Add system message about refresh
const refreshMessage: Message = {
id: `msg_${Date.now()}_refresh`,
role: 'system',
content: `Canvas refreshed with latest changes from ${studyIdToReload}`,
timestamp: new Date(),
};
setState((prev) => ({
...prev,
messages: [...prev.messages, refreshMessage],
}));
} catch (error) {
console.error('[ClaudeCode] Failed to reload canvas:', error);
}
},
[loadFromConfig]
);
const generateMessageId = () => {
return `msg_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
const sendMessage = useCallback(
async (content: string) => {
if (!content.trim() || state.isThinking) return;
if (!wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) {
onError?.('Not connected to Claude Code');
return;
}
// Add user message
const userMessage: Message = {
id: generateMessageId(),
role: 'user',
content: content.trim(),
timestamp: new Date(),
};
// Add assistant message placeholder
const assistantMessage: Message = {
id: generateMessageId(),
role: 'assistant',
content: '',
timestamp: new Date(),
isStreaming: true,
};
setState((prev) => ({
...prev,
messages: [...prev.messages, userMessage, assistantMessage],
isThinking: true,
error: null,
}));
// Reset current message tracking
currentMessageRef.current = '';
// Send message via WebSocket with canvas state
wsRef.current.send(
JSON.stringify({
type: 'message',
content: content.trim(),
canvas_state: canvasStateRef.current || undefined,
})
);
},
[state.isThinking, onError]
);
const clearMessages = useCallback(() => {
setState((prev) => ({
...prev,
messages: [],
error: null,
}));
currentMessageRef.current = '';
}, []);
// Update canvas state (call this when canvas changes)
const updateCanvasState = useCallback((newCanvasState: CanvasState | null) => {
canvasStateRef.current = newCanvasState;
// Also send to backend to update context
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(
JSON.stringify({
type: 'set_canvas',
canvas_state: newCanvasState,
})
);
}
}, []);
// Send ping to keep connection alive
useEffect(() => {
const pingInterval = setInterval(() => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(JSON.stringify({ type: 'ping' }));
}
}, 30000); // Every 30 seconds
return () => clearInterval(pingInterval);
}, []);
return {
messages: state.messages,
isThinking: state.isThinking,
error: state.error,
sessionId: state.sessionId,
isConnected: state.isConnected,
workingDir: state.workingDir,
sendMessage,
clearMessages,
updateCanvasState,
reloadCanvasFromStudy,
};
}

View File

@@ -0,0 +1,288 @@
/**
* useSpecWebSocket - WebSocket connection for real-time spec sync
*
* Connects to the backend WebSocket endpoint for live spec updates.
* Handles auto-reconnection, message parsing, and store updates.
*
* P2.11-P2.14: WebSocket sync implementation
*/
import { useEffect, useRef, useCallback, useState } from 'react';
import { useSpecStore } from './useSpecStore';
// ============================================================================
// Types
// ============================================================================
export type ConnectionStatus = 'disconnected' | 'connecting' | 'connected' | 'reconnecting';
interface SpecWebSocketMessage {
type: 'modification' | 'full_sync' | 'error' | 'ping';
payload: unknown;
}
interface ModificationPayload {
operation: 'set' | 'add' | 'remove';
path: string;
value?: unknown;
modified_by: string;
timestamp: string;
hash: string;
}
interface ErrorPayload {
message: string;
code?: string;
}
interface UseSpecWebSocketOptions {
/**
* Enable auto-reconnect on disconnect (default: true)
*/
autoReconnect?: boolean;
/**
* Reconnect delay in ms (default: 3000)
*/
reconnectDelay?: number;
/**
* Max reconnect attempts (default: 10)
*/
maxReconnectAttempts?: number;
/**
* Client identifier for tracking modifications (default: 'canvas')
*/
clientId?: string;
}
interface UseSpecWebSocketReturn {
/**
* Current connection status
*/
status: ConnectionStatus;
/**
* Manually disconnect
*/
disconnect: () => void;
/**
* Manually reconnect
*/
reconnect: () => void;
/**
* Send a message to the WebSocket (for future use)
*/
send: (message: SpecWebSocketMessage) => void;
/**
* Last error message if any
*/
lastError: string | null;
}
// ============================================================================
// Hook
// ============================================================================
export function useSpecWebSocket(
studyId: string | null,
options: UseSpecWebSocketOptions = {}
): UseSpecWebSocketReturn {
const {
autoReconnect = true,
reconnectDelay = 3000,
maxReconnectAttempts = 10,
clientId = 'canvas',
} = options;
const wsRef = useRef<WebSocket | null>(null);
const reconnectAttemptsRef = useRef(0);
const reconnectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null);
const [status, setStatus] = useState<ConnectionStatus>('disconnected');
const [lastError, setLastError] = useState<string | null>(null);
// Get store actions
const reloadSpec = useSpecStore((s) => s.reloadSpec);
const setError = useSpecStore((s) => s.setError);
// Build WebSocket URL
const getWsUrl = useCallback((id: string): string => {
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
const host = window.location.host;
return `${protocol}//${host}/api/studies/${encodeURIComponent(id)}/spec/sync?client_id=${clientId}`;
}, [clientId]);
// Handle incoming messages
const handleMessage = useCallback((event: MessageEvent) => {
try {
const message: SpecWebSocketMessage = JSON.parse(event.data);
switch (message.type) {
case 'modification': {
const payload = message.payload as ModificationPayload;
// Skip if this is our own modification
if (payload.modified_by === clientId) {
return;
}
// Reload spec to get latest state
// In a more sophisticated implementation, we could apply the patch locally
reloadSpec().catch((err) => {
console.error('Failed to reload spec after modification:', err);
});
break;
}
case 'full_sync': {
// Full spec sync requested (e.g., after reconnect)
reloadSpec().catch((err) => {
console.error('Failed to reload spec during full_sync:', err);
});
break;
}
case 'error': {
const payload = message.payload as ErrorPayload;
console.error('WebSocket error:', payload.message);
setLastError(payload.message);
setError(payload.message);
break;
}
case 'ping': {
// Keep-alive ping, respond with pong
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(JSON.stringify({ type: 'pong' }));
}
break;
}
default:
console.warn('Unknown WebSocket message type:', message.type);
}
} catch (error) {
console.error('Failed to parse WebSocket message:', error);
}
}, [clientId, reloadSpec, setError]);
// Connect to WebSocket
const connect = useCallback(() => {
if (!studyId) return;
// Clean up existing connection
if (wsRef.current) {
wsRef.current.close();
}
setStatus('connecting');
setLastError(null);
const url = getWsUrl(studyId);
const ws = new WebSocket(url);
ws.onopen = () => {
setStatus('connected');
reconnectAttemptsRef.current = 0;
};
ws.onmessage = handleMessage;
ws.onerror = (event) => {
console.error('WebSocket error:', event);
setLastError('WebSocket connection error');
};
ws.onclose = (_event) => {
setStatus('disconnected');
// Check if we should reconnect
if (autoReconnect && reconnectAttemptsRef.current < maxReconnectAttempts) {
reconnectAttemptsRef.current++;
setStatus('reconnecting');
// Clear any existing reconnect timeout
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
}
// Schedule reconnect with exponential backoff
const delay = reconnectDelay * Math.min(reconnectAttemptsRef.current, 5);
reconnectTimeoutRef.current = setTimeout(() => {
connect();
}, delay);
} else if (reconnectAttemptsRef.current >= maxReconnectAttempts) {
setLastError('Max reconnection attempts reached');
}
};
wsRef.current = ws;
}, [studyId, getWsUrl, handleMessage, autoReconnect, reconnectDelay, maxReconnectAttempts]);
// Disconnect
const disconnect = useCallback(() => {
// Clear reconnect timeout
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
reconnectTimeoutRef.current = null;
}
// Close WebSocket
if (wsRef.current) {
wsRef.current.close();
wsRef.current = null;
}
reconnectAttemptsRef.current = maxReconnectAttempts; // Prevent auto-reconnect
setStatus('disconnected');
}, [maxReconnectAttempts]);
// Reconnect
const reconnect = useCallback(() => {
reconnectAttemptsRef.current = 0;
connect();
}, [connect]);
// Send message
const send = useCallback((message: SpecWebSocketMessage) => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(JSON.stringify(message));
} else {
console.warn('WebSocket not connected, cannot send message');
}
}, []);
// Connect when studyId changes
useEffect(() => {
if (studyId) {
connect();
} else {
disconnect();
}
return () => {
// Cleanup on unmount or studyId change
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
}
if (wsRef.current) {
wsRef.current.close();
}
};
}, [studyId, connect, disconnect]);
return {
status,
disconnect,
reconnect,
send,
lastError,
};
}
export default useSpecWebSocket;

View File

@@ -18,7 +18,8 @@ export const useOptimizationWebSocket = ({ studyId, onMessage }: UseOptimization
const host = window.location.host; // This will be localhost:3000 in dev
// If using proxy in vite.config.ts, this works.
// If not, we might need to hardcode backend URL for dev:
const backendHost = import.meta.env.DEV ? 'localhost:8000' : host;
// Use port 8001 to match start-dashboard.bat
const backendHost = import.meta.env.DEV ? 'localhost:8001' : host;
setSocketUrl(`${protocol}//${backendHost}/api/ws/optimization/${studyId}`);
} else {

View File

@@ -1,4 +1,4 @@
import { useState, useEffect, lazy, Suspense, useMemo } from 'react';
import { useState, useEffect, useMemo } from 'react';
import { useNavigate } from 'react-router-dom';
import {
BarChart3,
@@ -14,25 +14,10 @@ import {
} from 'lucide-react';
import { useStudy } from '../context/StudyContext';
import { Card } from '../components/common/Card';
// Lazy load charts
const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
const PlotlyCorrelationHeatmap = lazy(() => import('../components/plotly/PlotlyCorrelationHeatmap').then(m => ({ default: m.PlotlyCorrelationHeatmap })));
const PlotlyFeasibilityChart = lazy(() => import('../components/plotly/PlotlyFeasibilityChart').then(m => ({ default: m.PlotlyFeasibilityChart })));
const PlotlySurrogateQuality = lazy(() => import('../components/plotly/PlotlySurrogateQuality').then(m => ({ default: m.PlotlySurrogateQuality })));
const PlotlyRunComparison = lazy(() => import('../components/plotly/PlotlyRunComparison').then(m => ({ default: m.PlotlyRunComparison })));
const ChartLoading = () => (
<div className="flex items-center justify-center h-64 text-dark-400">
<div className="flex flex-col items-center gap-2">
<div className="animate-spin w-6 h-6 border-2 border-primary-500 border-t-transparent rounded-full"></div>
<span className="text-sm animate-pulse">Loading chart...</span>
</div>
</div>
);
import { ConvergencePlot } from '../components/ConvergencePlot';
import { ParameterImportanceChart } from '../components/ParameterImportanceChart';
import { ParallelCoordinatesPlot } from '../components/ParallelCoordinatesPlot';
import { ParetoPlot } from '../components/ParetoPlot';
const NoData = ({ message = 'No data available' }: { message?: string }) => (
<div className="flex items-center justify-center h-64 text-dark-500">
@@ -383,15 +368,12 @@ export default function Analysis() {
{/* Convergence Plot */}
{trials.length > 0 && (
<Card title="Convergence Plot">
<Suspense fallback={<ChartLoading />}>
<PlotlyConvergencePlot
trials={trials}
objectiveIndex={0}
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
direction="minimize"
height={350}
/>
</Suspense>
<ConvergencePlot
trials={trials}
objectiveIndex={0}
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
direction="minimize"
/>
</Card>
)}
@@ -455,30 +437,24 @@ export default function Analysis() {
{/* Parameter Importance */}
{trials.length > 0 && metadata?.design_variables && (
<Card title="Parameter Importance">
<Suspense fallback={<ChartLoading />}>
<PlotlyParameterImportance
trials={trials}
designVariables={metadata.design_variables}
objectiveIndex={0}
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
height={400}
/>
</Suspense>
<ParameterImportanceChart
trials={trials}
designVariables={metadata.design_variables}
objectiveIndex={0}
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
/>
</Card>
)}
{/* Parallel Coordinates */}
{trials.length > 0 && metadata && (
<Card title="Parallel Coordinates">
<Suspense fallback={<ChartLoading />}>
<PlotlyParallelCoordinates
trials={trials}
objectives={metadata.objectives || []}
designVariables={metadata.design_variables || []}
paretoFront={paretoFront}
height={450}
/>
</Suspense>
<ParallelCoordinatesPlot
paretoData={trials}
objectives={metadata.objectives || []}
designVariables={metadata.design_variables || []}
paretoFront={paretoFront}
/>
</Card>
)}
</div>
@@ -508,14 +484,11 @@ export default function Analysis() {
{/* Pareto Front Plot */}
{paretoFront.length > 0 && (
<Card title="Pareto Front">
<Suspense fallback={<ChartLoading />}>
<PlotlyParetoPlot
trials={trials}
paretoFront={paretoFront}
objectives={metadata?.objectives || []}
height={500}
/>
</Suspense>
<ParetoPlot
paretoData={paretoFront}
objectives={metadata?.objectives || []}
allTrials={trials}
/>
</Card>
)}
@@ -550,16 +523,10 @@ export default function Analysis() {
{/* Correlations Tab */}
{activeTab === 'correlations' && (
<div className="space-y-6">
{/* Correlation Heatmap */}
{/* Correlation Analysis */}
{trials.length > 2 && (
<Card title="Parameter-Objective Correlation Matrix">
<Suspense fallback={<ChartLoading />}>
<PlotlyCorrelationHeatmap
trials={trials}
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
height={Math.min(500, 100 + Object.keys(trials[0]?.params || {}).length * 40)}
/>
</Suspense>
<Card title="Parameter-Objective Correlation Analysis">
<CorrelationTable trials={trials} objectiveName={metadata?.objectives?.[0]?.name || 'Objective'} />
</Card>
)}
@@ -612,11 +579,22 @@ export default function Analysis() {
</Card>
</div>
{/* Feasibility Over Time Chart */}
<Card title="Feasibility Rate Over Time">
<Suspense fallback={<ChartLoading />}>
<PlotlyFeasibilityChart trials={trials} height={350} />
</Suspense>
{/* Feasibility Summary */}
<Card title="Feasibility Analysis">
<div className="p-4">
<div className="flex items-center gap-4 mb-4">
<div className="flex-1 bg-dark-700 rounded-full h-4 overflow-hidden">
<div
className="h-full bg-green-500 transition-all duration-500"
style={{ width: `${stats.feasibilityRate}%` }}
/>
</div>
<span className="text-lg font-bold text-green-400">{stats.feasibilityRate.toFixed(1)}%</span>
</div>
<p className="text-dark-400 text-sm">
{stats.feasible} of {stats.total} trials satisfy all constraints
</p>
</div>
</Card>
{/* Infeasible Trials List */}
@@ -683,11 +661,38 @@ export default function Analysis() {
</Card>
</div>
{/* Surrogate Quality Charts */}
<Card title="Surrogate Model Analysis">
<Suspense fallback={<ChartLoading />}>
<PlotlySurrogateQuality trials={trials} height={400} />
</Suspense>
{/* Surrogate Performance Summary */}
<Card title="Surrogate Model Performance">
<div className="grid grid-cols-2 gap-6 p-4">
<div>
<h4 className="text-sm font-semibold text-dark-300 mb-3">Trial Distribution</h4>
<div className="space-y-2">
<div className="flex items-center gap-3">
<div className="w-3 h-3 bg-blue-500 rounded-full"></div>
<span className="text-dark-200">FEA: {stats.feaTrials} trials</span>
<span className="text-dark-400 ml-auto">
{((stats.feaTrials / stats.total) * 100).toFixed(0)}%
</span>
</div>
<div className="flex items-center gap-3">
<div className="w-3 h-3 bg-purple-500 rounded-full"></div>
<span className="text-dark-200">NN: {stats.nnTrials} trials</span>
<span className="text-dark-400 ml-auto">
{((stats.nnTrials / stats.total) * 100).toFixed(0)}%
</span>
</div>
</div>
</div>
<div>
<h4 className="text-sm font-semibold text-dark-300 mb-3">Efficiency Gains</h4>
<div className="text-center p-4 bg-dark-750 rounded-lg">
<div className="text-3xl font-bold text-primary-400">
{stats.feaTrials > 0 ? `${(stats.total / stats.feaTrials).toFixed(1)}x` : '1.0x'}
</div>
<div className="text-xs text-dark-400 mt-1">Effective Speedup</div>
</div>
</div>
</div>
</Card>
</div>
)}
@@ -700,9 +705,36 @@ export default function Analysis() {
Compare different optimization runs within this study. Studies with adaptive optimization
may have multiple runs (e.g., initial FEA exploration, NN-accelerated iterations).
</p>
<Suspense fallback={<ChartLoading />}>
<PlotlyRunComparison runs={runs} height={400} />
</Suspense>
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b border-dark-600">
<th className="text-left py-2 px-3 text-dark-400 font-medium">Run</th>
<th className="text-left py-2 px-3 text-dark-400 font-medium">Source</th>
<th className="text-left py-2 px-3 text-dark-400 font-medium">Trials</th>
<th className="text-left py-2 px-3 text-dark-400 font-medium">Best Value</th>
<th className="text-left py-2 px-3 text-dark-400 font-medium">Avg Value</th>
</tr>
</thead>
<tbody>
{runs.map((run) => (
<tr key={run.run_id} className="border-b border-dark-700">
<td className="py-2 px-3 font-mono text-white">{run.name || `Run ${run.run_id}`}</td>
<td className="py-2 px-3">
<span className={`px-2 py-0.5 rounded text-xs ${
run.source === 'NN' ? 'bg-purple-500/20 text-purple-400' : 'bg-blue-500/20 text-blue-400'
}`}>
{run.source}
</span>
</td>
<td className="py-2 px-3 text-dark-200">{run.trial_count}</td>
<td className="py-2 px-3 font-mono text-green-400">{run.best_value?.toExponential(4) || 'N/A'}</td>
<td className="py-2 px-3 font-mono text-dark-300">{run.avg_value?.toExponential(4) || 'N/A'}</td>
</tr>
))}
</tbody>
</table>
</div>
</Card>
</div>
)}

View File

@@ -1,4 +1,4 @@
import { useState, useEffect, lazy, Suspense, useRef } from 'react';
import { useState, useEffect, useRef } from 'react';
import { useNavigate } from 'react-router-dom';
import { Settings } from 'lucide-react';
import { useOptimizationWebSocket } from '../hooks/useWebSocket';
@@ -21,19 +21,6 @@ import { CurrentTrialPanel, OptimizerStatePanel } from '../components/tracker';
import { NivoParallelCoordinates } from '../components/charts';
import type { Trial } from '../types';
// Lazy load Plotly components for better initial load performance
const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
// Loading placeholder for lazy components
const ChartLoading = () => (
<div className="flex items-center justify-center h-64 text-dark-400">
<div className="animate-pulse">Loading chart...</div>
</div>
);
export default function Dashboard() {
const navigate = useNavigate();
const { selectedStudy, refreshStudies, isInitialized } = useStudy();
@@ -62,8 +49,8 @@ export default function Dashboard() {
const [paretoFront, setParetoFront] = useState<any[]>([]);
const [allTrialsRaw, setAllTrialsRaw] = useState<any[]>([]); // All trials for parallel coordinates
// Chart library toggle: 'nivo' (dark theme, default), 'plotly' (more interactive), or 'recharts' (simple)
const [chartLibrary, setChartLibrary] = useState<'nivo' | 'plotly' | 'recharts'>('nivo');
// Chart library toggle: 'nivo' (dark theme, default) or 'recharts' (simple)
const [chartLibrary, setChartLibrary] = useState<'nivo' | 'recharts'>('nivo');
// Process status for tracker panels
const [isRunning, setIsRunning] = useState(false);
@@ -464,18 +451,7 @@ export default function Dashboard() {
}`}
title="Modern Nivo charts with dark theme (recommended)"
>
Nivo
</button>
<button
onClick={() => setChartLibrary('plotly')}
className={`px-3 py-1.5 text-sm transition-colors ${
chartLibrary === 'plotly'
? 'bg-primary-500 text-white'
: 'bg-dark-600 text-dark-200 hover:bg-dark-500'
}`}
title="Interactive Plotly charts with zoom, pan, and export"
>
Plotly
Advanced
</button>
<button
onClick={() => setChartLibrary('recharts')}
@@ -570,22 +546,11 @@ export default function Dashboard() {
title="Pareto Front"
subtitle={`${paretoFront.length} Pareto-optimal solutions | ${studyMetadata.sampler || 'NSGA-II'} | ${studyMetadata.objectives?.length || 2} objectives`}
>
{chartLibrary === 'plotly' ? (
<Suspense fallback={<ChartLoading />}>
<PlotlyParetoPlot
trials={allTrialsRaw}
paretoFront={paretoFront}
objectives={studyMetadata.objectives}
height={300}
/>
</Suspense>
) : (
<ParetoPlot
paretoData={paretoFront}
objectives={studyMetadata.objectives}
allTrials={allTrialsRaw}
/>
)}
<ParetoPlot
paretoData={paretoFront}
objectives={studyMetadata.objectives}
allTrials={allTrialsRaw}
/>
</ExpandableChart>
</div>
)}
@@ -605,16 +570,6 @@ export default function Dashboard() {
paretoFront={paretoFront}
height={380}
/>
) : chartLibrary === 'plotly' ? (
<Suspense fallback={<ChartLoading />}>
<PlotlyParallelCoordinates
trials={allTrialsRaw}
objectives={studyMetadata.objectives}
designVariables={studyMetadata.design_variables}
paretoFront={paretoFront}
height={350}
/>
</Suspense>
) : (
<ParallelCoordinatesPlot
paretoData={allTrialsRaw}
@@ -634,24 +589,12 @@ export default function Dashboard() {
title="Convergence"
subtitle={`Best ${studyMetadata?.objectives?.[0]?.name || 'Objective'} over ${allTrialsRaw.length} trials`}
>
{chartLibrary === 'plotly' ? (
<Suspense fallback={<ChartLoading />}>
<PlotlyConvergencePlot
trials={allTrialsRaw}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
direction="minimize"
height={280}
/>
</Suspense>
) : (
<ConvergencePlot
trials={allTrialsRaw}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
direction="minimize"
/>
)}
<ConvergencePlot
trials={allTrialsRaw}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
direction="minimize"
/>
</ExpandableChart>
</div>
)}
@@ -663,32 +606,16 @@ export default function Dashboard() {
title="Parameter Importance"
subtitle={`Correlation with ${studyMetadata?.objectives?.[0]?.name || 'Objective'}`}
>
{chartLibrary === 'plotly' ? (
<Suspense fallback={<ChartLoading />}>
<PlotlyParameterImportance
trials={allTrialsRaw}
designVariables={
studyMetadata?.design_variables?.length > 0
? studyMetadata.design_variables
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
height={280}
/>
</Suspense>
) : (
<ParameterImportanceChart
trials={allTrialsRaw}
designVariables={
studyMetadata?.design_variables?.length > 0
? studyMetadata.design_variables
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
/>
)}
<ParameterImportanceChart
trials={allTrialsRaw}
designVariables={
studyMetadata?.design_variables?.length > 0
? studyMetadata.design_variables
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
}
objectiveIndex={0}
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
/>
</ExpandableChart>
</div>
)}

View File

@@ -394,18 +394,32 @@ const Home: React.FC = () => {
<p className="text-dark-400 text-sm">Study Documentation</p>
</div>
</div>
<button
onClick={() => handleSelectStudy(selectedPreview)}
className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
style={{
background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
color: '#000',
boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
}}
>
Open
<ArrowRight className="w-4 h-4" />
</button>
<div className="flex items-center gap-2">
<button
onClick={() => navigate(`/canvas/${selectedPreview.id}`)}
className="flex items-center gap-2 px-4 py-2.5 rounded-lg transition-all font-medium whitespace-nowrap hover:-translate-y-0.5"
style={{
background: 'rgba(8, 15, 26, 0.85)',
border: '1px solid rgba(0, 212, 230, 0.3)',
color: '#00d4e6'
}}
>
<Layers className="w-4 h-4" />
Canvas
</button>
<button
onClick={() => handleSelectStudy(selectedPreview)}
className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
style={{
background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
color: '#000',
boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
}}
>
Open
<ArrowRight className="w-4 h-4" />
</button>
</div>
</div>
{/* Study Quick Stats */}

View File

@@ -20,11 +20,11 @@ import {
ExternalLink,
Zap,
List,
LucideIcon
LucideIcon,
FileText
} from 'lucide-react';
import { useStudy } from '../context/StudyContext';
import { Card } from '../components/common/Card';
import Plot from 'react-plotly.js';
// ============================================================================
// Types
@@ -642,13 +642,15 @@ export default function Insights() {
Open Full View
</button>
)}
<button
onClick={() => setFullscreen(true)}
className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
title="Fullscreen"
>
<Maximize2 className="w-5 h-5" />
</button>
{activeInsight.html_path && (
<button
onClick={() => setFullscreen(true)}
className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
title="Fullscreen"
>
<Maximize2 className="w-5 h-5" />
</button>
)}
</div>
</div>
@@ -674,49 +676,43 @@ export default function Insights() {
</div>
)}
{/* Plotly Figure */}
{/* Insight Result */}
<Card className="p-0 overflow-hidden">
{activeInsight.plotly_figure ? (
<div className="bg-dark-900" style={{ height: '600px' }}>
<Plot
data={activeInsight.plotly_figure.data}
layout={{
...activeInsight.plotly_figure.layout,
autosize: true,
margin: { l: 60, r: 60, t: 60, b: 60 },
paper_bgcolor: '#111827',
plot_bgcolor: '#1f2937',
font: { color: 'white' }
}}
config={{
responsive: true,
displayModeBar: true,
displaylogo: false
}}
style={{ width: '100%', height: '100%' }}
/>
</div>
) : (
<div className="flex flex-col items-center justify-center h-64 text-dark-400 p-8">
<CheckCircle className="w-12 h-12 text-green-400 mb-4" />
<p className="text-lg font-medium text-white mb-2">Insight Generated Successfully</p>
<div className="flex flex-col items-center justify-center h-64 text-dark-400 p-8">
<CheckCircle className="w-12 h-12 text-green-400 mb-4" />
<p className="text-lg font-medium text-white mb-2">Insight Generated Successfully</p>
{activeInsight.html_path ? (
<>
<p className="text-sm text-center mb-4">
Click the button below to view the interactive visualization.
</p>
<button
onClick={() => window.open(`/api/insights/studies/${selectedStudy?.id}/view/${activeInsight.insight_type}`, '_blank')}
className="flex items-center gap-2 px-6 py-3 bg-primary-600 hover:bg-primary-500 text-white rounded-lg font-medium transition-colors"
>
<ExternalLink className="w-5 h-5" />
Open Interactive Visualization
</button>
</>
) : (
<p className="text-sm text-center">
This insight generates HTML files. Click "Open Full View" to see the visualization.
The visualization has been generated. Check the study's insights folder.
</p>
{activeInsight.summary?.html_files && (
<div className="mt-4 text-sm">
<p className="text-dark-400 mb-2">Generated files:</p>
<ul className="space-y-1">
{(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
<li key={i} className="text-dark-300">
{f.split(/[/\\]/).pop()}
</li>
))}
</ul>
</div>
)}
</div>
)}
)}
{activeInsight.summary?.html_files && (
<div className="mt-4 text-sm">
<p className="text-dark-400 mb-2">Generated files:</p>
<ul className="space-y-1">
{(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
<li key={i} className="text-dark-300 flex items-center gap-2">
<FileText className="w-3 h-3" />
{f.split(/[/\\]/).pop()}
</li>
))}
</ul>
</div>
)}
</div>
</Card>
{/* Generate Another */}
@@ -736,8 +732,8 @@ export default function Insights() {
</div>
)}
{/* Fullscreen Modal */}
{fullscreen && activeInsight?.plotly_figure && (
{/* Fullscreen Modal - now opens external HTML */}
{fullscreen && activeInsight && (
<div className="fixed inset-0 z-50 bg-dark-900 flex flex-col">
<div className="flex items-center justify-between p-4 border-b border-dark-600">
<h2 className="text-xl font-bold text-white">
@@ -750,23 +746,24 @@ export default function Insights() {
<X className="w-6 h-6" />
</button>
</div>
<div className="flex-1 p-4">
<Plot
data={activeInsight.plotly_figure.data}
layout={{
...activeInsight.plotly_figure.layout,
autosize: true,
paper_bgcolor: '#111827',
plot_bgcolor: '#1f2937',
font: { color: 'white' }
}}
config={{
responsive: true,
displayModeBar: true,
displaylogo: false
}}
style={{ width: '100%', height: '100%' }}
/>
<div className="flex-1 p-4 flex items-center justify-center">
{activeInsight.html_path ? (
<iframe
src={`/api/insights/studies/${selectedStudy?.id}/view/${activeInsight.insight_type}`}
className="w-full h-full border-0 rounded-lg"
title={activeInsight.insight_name || activeInsight.insight_type}
/>
) : (
<div className="text-center text-dark-400">
<p className="text-lg mb-4">No interactive visualization available for this insight.</p>
<button
onClick={() => setFullscreen(false)}
className="px-4 py-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg"
>
Close
</button>
</div>
)}
</div>
</div>
)}

View File

@@ -278,7 +278,7 @@ export default function Setup() {
Configuration
</button>
<button
onClick={() => setActiveTab('canvas')}
onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-primary-600 text-white"
>
<Grid3X3 className="w-4 h-4" />
@@ -333,7 +333,7 @@ export default function Setup() {
Configuration
</button>
<button
onClick={() => setActiveTab('canvas')}
onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-dark-800 text-dark-300 hover:text-white hover:bg-dark-700"
>
<Grid3X3 className="w-4 h-4" />

View File

@@ -1,3 +1,6 @@
// AtomizerSpec v2.0 types (unified configuration)
export * from './atomizer-spec';
// Study types
export interface Study {
id: string;

View File

@@ -17,18 +17,10 @@ export default defineConfig({
}
}
},
resolve: {
alias: {
// Use the smaller basic Plotly distribution
'plotly.js/dist/plotly': 'plotly.js-basic-dist'
}
},
build: {
rollupOptions: {
output: {
manualChunks: {
// Separate Plotly into its own chunk for better caching
plotly: ['plotly.js-basic-dist', 'react-plotly.js'],
// Separate React and core libs
vendor: ['react', 'react-dom', 'react-router-dom'],
// Recharts in its own chunk
@@ -37,8 +29,5 @@ export default defineConfig({
}
},
chunkSizeWarningLimit: 600
},
optimizeDeps: {
include: ['plotly.js-basic-dist']
}
})

View File

@@ -25,6 +25,18 @@ if not exist "%CONDA_PATH%\Scripts\activate.bat" (
exit /b 1
)
:: Stop any existing dashboard processes first
echo [0/3] Stopping existing processes...
taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%BACKEND_PORT% ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%FRONTEND_PORT% ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)
ping 127.0.0.1 -n 2 >nul
echo [1/3] Starting Backend Server (port %BACKEND_PORT%)...
start "Atomizer Backend" cmd /k "call %CONDA_PATH%\Scripts\activate.bat %CONDA_ENV% && cd /d %SCRIPT_DIR%backend && python -m uvicorn api.main:app --reload --port %BACKEND_PORT%"

View File

@@ -10,11 +10,11 @@ echo.
taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
:: Kill any remaining processes on the ports
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8000 ^| findstr LISTENING') do (
:: Kill any remaining processes on the ports (backend: 8001, frontend: 3003)
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8001 ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :5173 ^| findstr LISTENING') do (
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :3003 ^| findstr LISTENING') do (
taskkill /F /PID %%a >nul 2>&1
)