Compare commits
25 Commits
b05412f807
...
feature/st
| Author | SHA1 | Date | |
|---|---|---|---|
| a3f18dc377 | |||
| 2cb8dccc3a | |||
| c224b16ac3 | |||
| e1c59a51c1 | |||
| f725e75164 | |||
| e954b130f5 | |||
| 5b22439357 | |||
| 0c252e3a65 | |||
| 4749944a48 | |||
| 3229c31349 | |||
| 14354a2606 | |||
| abbc7b1b50 | |||
| 1cdcc17ffd | |||
| 5c419e2358 | |||
| 89694088a2 | |||
| 91cf9ca1fd | |||
| ced79b8d39 | |||
| 2f0f45de86 | |||
| 47f8b50112 | |||
| cf8c57fdac | |||
| 6c30224341 | |||
| 27e78d3d56 | |||
| cb6b130908 | |||
| f067497e08 | |||
| ba0b9a1fae |
@@ -1 +0,0 @@
|
||||
{"mcpServers": {"atomizer": {"command": "node", "args": ["C:\\Users\\antoi\\Atomizer\\mcp-server\\atomizer-tools\\dist\\index.js"], "env": {"ATOMIZER_MODE": "user", "ATOMIZER_ROOT": "C:\\Users\\antoi\\Atomizer"}}}}
|
||||
@@ -1 +0,0 @@
|
||||
{"mcpServers": {"atomizer": {"command": "node", "args": ["C:\\Users\\antoi\\Atomizer\\mcp-server\\atomizer-tools\\dist\\index.js"], "env": {"ATOMIZER_MODE": "user", "ATOMIZER_ROOT": "C:\\Users\\antoi\\Atomizer"}}}}
|
||||
@@ -1,45 +0,0 @@
|
||||
# Atomizer Assistant
|
||||
|
||||
You are the Atomizer Assistant - an expert system for structural optimization using FEA.
|
||||
|
||||
**Current Mode**: USER
|
||||
|
||||
Your role:
|
||||
- Help engineers with FEA optimization workflows
|
||||
- Create, configure, and run optimization studies
|
||||
- Analyze results and provide insights
|
||||
- Explain FEA concepts and methodology
|
||||
|
||||
Important guidelines:
|
||||
- Be concise and professional
|
||||
- Use technical language appropriate for engineers
|
||||
- You are "Atomizer Assistant", not a generic AI
|
||||
- Use the available MCP tools to perform actions
|
||||
- When asked about studies, use the appropriate tools to get real data
|
||||
|
||||
|
||||
---
|
||||
|
||||
# Current Study: m1_mirror_flatback_lateral
|
||||
|
||||
**Status**: Study directory not found.
|
||||
|
||||
---
|
||||
|
||||
# User Mode Instructions
|
||||
|
||||
You can help with optimization workflows:
|
||||
- Create and configure studies
|
||||
- Run optimizations
|
||||
- Analyze results
|
||||
- Generate reports
|
||||
- Explain FEA concepts
|
||||
|
||||
**For code modifications**, suggest switching to Power Mode.
|
||||
|
||||
Available tools:
|
||||
- `list_studies`, `get_study_status`, `create_study`
|
||||
- `run_optimization`, `stop_optimization`, `get_optimization_status`
|
||||
- `get_trial_data`, `analyze_convergence`, `compare_trials`, `get_best_design`
|
||||
- `generate_report`, `export_data`
|
||||
- `explain_physics`, `recommend_method`, `query_extractors`
|
||||
@@ -1,45 +0,0 @@
|
||||
# Atomizer Assistant
|
||||
|
||||
You are the Atomizer Assistant - an expert system for structural optimization using FEA.
|
||||
|
||||
**Current Mode**: USER
|
||||
|
||||
Your role:
|
||||
- Help engineers with FEA optimization workflows
|
||||
- Create, configure, and run optimization studies
|
||||
- Analyze results and provide insights
|
||||
- Explain FEA concepts and methodology
|
||||
|
||||
Important guidelines:
|
||||
- Be concise and professional
|
||||
- Use technical language appropriate for engineers
|
||||
- You are "Atomizer Assistant", not a generic AI
|
||||
- Use the available MCP tools to perform actions
|
||||
- When asked about studies, use the appropriate tools to get real data
|
||||
|
||||
|
||||
---
|
||||
|
||||
# Current Study: m1_mirror_flatback_lateral
|
||||
|
||||
**Status**: Study directory not found.
|
||||
|
||||
---
|
||||
|
||||
# User Mode Instructions
|
||||
|
||||
You can help with optimization workflows:
|
||||
- Create and configure studies
|
||||
- Run optimizations
|
||||
- Analyze results
|
||||
- Generate reports
|
||||
- Explain FEA concepts
|
||||
|
||||
**For code modifications**, suggest switching to Power Mode.
|
||||
|
||||
Available tools:
|
||||
- `list_studies`, `get_study_status`, `create_study`
|
||||
- `run_optimization`, `stop_optimization`, `get_optimization_status`
|
||||
- `get_trial_data`, `analyze_convergence`, `compare_trials`, `get_best_design`
|
||||
- `generate_report`, `export_data`
|
||||
- `explain_physics`, `recommend_method`, `query_extractors`
|
||||
@@ -62,7 +62,23 @@
|
||||
"Bash(xargs -I{} git ls-tree -r -l HEAD {})",
|
||||
"Bash(sort:*)",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe introspect_model.py)",
|
||||
"Bash(xargs:*)"
|
||||
"Bash(xargs:*)",
|
||||
"Bash(ping:*)",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -c \"import requests; r = requests.post\\(''http://127.0.0.1:8001/api/claude/sessions'', json={''mode'': ''user''}\\); print\\(f''Status: {r.status_code}''\\); print\\(f''Response: {r.text}''\\)\")",
|
||||
"Bash(start \"Atomizer Backend\" cmd /k C:UsersantoiAtomizerrestart_backend.bat)",
|
||||
"Bash(start \"Test Backend\" cmd /c \"cd /d C:\\\\Users\\\\antoi\\\\Atomizer\\\\atomizer-dashboard\\\\backend && C:\\\\Users\\\\antoi\\\\anaconda3\\\\Scripts\\\\activate.bat atomizer && python -m uvicorn api.main:app --port 8002\")",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe C:UsersantoiAtomizertest_backend.py)",
|
||||
"Bash(start \"Backend 8002\" C:UsersantoiAtomizerstart_backend_8002.bat)",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -c \"from api.main import app; print\\(''Import OK''\\)\")",
|
||||
"Bash(find:*)",
|
||||
"Bash(npx tailwindcss:*)",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -c \"from pathlib import Path; p = Path\\(''C:/Users/antoi/Atomizer/studies''\\) / ''M1_Mirror/m1_mirror_cost_reduction_lateral''; print\\(''exists:'', p.exists\\(\\), ''path:'', p\\)\")",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -c \"import sys, json; d=json.load\\(sys.stdin\\); print\\(''Study:'', d.get\\(''meta'',{}\\).get\\(''study_name'',''N/A''\\)\\); print\\(''Design Variables:''\\); [print\\(f'' - {dv[\"\"name\"\"]} \\({dv[\"\"expression_name\"\"]}\\)''\\) for dv in d.get\\(''design_variables'',[]\\)]\")",
|
||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -m py_compile:*)",
|
||||
"Skill(ralph-loop:ralph-loop)",
|
||||
"Skill(ralph-loop:ralph-loop:*)",
|
||||
"mcp__Claude_in_Chrome__computer",
|
||||
"mcp__Claude_in_Chrome__navigate"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -110,5 +110,17 @@ _dat_run*.dat
|
||||
.claude-mcp-*.json
|
||||
.claude-prompt-*.md
|
||||
|
||||
# Backend logs
|
||||
backend_stdout.log
|
||||
backend_stderr.log
|
||||
*.log.bak
|
||||
|
||||
# Linter/formatter caches
|
||||
.ruff_cache/
|
||||
.mypy_cache/
|
||||
|
||||
# Auto-generated documentation (regenerate with: python -m optimization_engine.auto_doc all)
|
||||
docs/generated/
|
||||
|
||||
# Malformed filenames (Windows path used as filename)
|
||||
C:*
|
||||
|
||||
58
CHANGELOG.md
58
CHANGELOG.md
@@ -6,6 +6,64 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.5.0] - 2025-01-24
|
||||
|
||||
### Project Cleanup & Organization
|
||||
- Deleted 102+ orphaned MCP session temp files
|
||||
- Removed build artifacts (htmlcov, dist, __pycache__)
|
||||
- Archived superseded plan documents (RALPH_LOOP V2/V3, CANVAS V3, etc.)
|
||||
- Moved debug/analysis scripts from tests/ to tools/analysis/
|
||||
- Updated .gitignore with missing patterns
|
||||
- Cleaned empty directories
|
||||
|
||||
## [0.4.0] - 2025-01-22
|
||||
|
||||
### Canvas UX Improvements (Phases 7-9)
|
||||
- **Resizable Panels**: Left sidebar (200-400px) and right panel (280-600px) with localStorage persistence
|
||||
- **All Palette Items Enabled**: All 8 node types now draggable (model, solver, designVar, extractor, objective, constraint, algorithm, surrogate)
|
||||
- **Solver Configuration**: Engine selection (NX Nastran, MSC Nastran, Python Script) with solution type dropdowns (SOL101-SOL200)
|
||||
|
||||
### AtomizerSpec v2.0
|
||||
- Unified JSON configuration schema for all studies
|
||||
- Added SolverEngine and NastranSolutionType types
|
||||
- Canvas position persistence for all nodes
|
||||
- Migration support from legacy optimization_config.json
|
||||
|
||||
## [0.3.0] - 2025-01-18
|
||||
|
||||
### Dashboard V3.1 - Canvas Builder
|
||||
- Visual workflow builder with 9 node types
|
||||
- Spec ↔ ReactFlow bidirectional converter
|
||||
- WebSocket real-time synchronization
|
||||
- Claude chat integration
|
||||
- Custom extractors with in-canvas code editor
|
||||
- Model introspection panel
|
||||
|
||||
### Learning Atomizer Core (LAC)
|
||||
- Persistent memory system for accumulated knowledge
|
||||
- Session insights recording (failures, workarounds, patterns)
|
||||
- Optimization outcome tracking
|
||||
|
||||
## [0.2.5] - 2025-01-16
|
||||
|
||||
### GNN Surrogate for Zernike Optimization
|
||||
- PolarMirrorGraph with fixed 3000-node polar grid
|
||||
- ZernikeGNN model with design-conditioned convolutions
|
||||
- Differentiable GPU-accelerated Zernike fitting
|
||||
- Training pipeline with multi-task loss
|
||||
|
||||
### DevLoop Automation
|
||||
- Closed-loop development system with AI agents
|
||||
- Gemini planning, Claude implementation
|
||||
- Playwright browser testing for dashboard UI
|
||||
|
||||
## [0.2.1] - 2025-01-07
|
||||
|
||||
### Optimization Engine v2.0 Restructure
|
||||
- Reorganized into modular subpackages (core/, nx/, study/, config/)
|
||||
- SpecManager for AtomizerSpec handling
|
||||
- Deprecation warnings for old import paths
|
||||
|
||||
### Phase 3.3 - Dashboard & Multi-Solution Support (November 23, 2025)
|
||||
|
||||
#### Added
|
||||
|
||||
43
CLAUDE.md
43
CLAUDE.md
@@ -55,6 +55,49 @@ If working directory is inside a study (`studies/*/`):
|
||||
- If no study context: Offer to create one or list available studies
|
||||
- After code changes: Update documentation proactively (SYS_12, cheatsheet)
|
||||
|
||||
### Step 5: Use DevLoop for Multi-Step Development Tasks
|
||||
|
||||
**CRITICAL: For any development task with 3+ steps, USE DEVLOOP instead of manual work.**
|
||||
|
||||
DevLoop is the closed-loop development system that coordinates AI agents for autonomous development:
|
||||
|
||||
```bash
|
||||
# Plan a task with Gemini
|
||||
python tools/devloop_cli.py plan "fix extractor exports"
|
||||
|
||||
# Implement with Claude
|
||||
python tools/devloop_cli.py implement
|
||||
|
||||
# Test filesystem/API
|
||||
python tools/devloop_cli.py test --study support_arm
|
||||
|
||||
# Test dashboard UI with Playwright
|
||||
python tools/devloop_cli.py browser --level full
|
||||
|
||||
# Analyze failures
|
||||
python tools/devloop_cli.py analyze
|
||||
|
||||
# Full autonomous cycle
|
||||
python tools/devloop_cli.py start "add new stress extractor"
|
||||
```
|
||||
|
||||
**When to use DevLoop:**
|
||||
- Fixing bugs that require multiple file changes
|
||||
- Adding new features or extractors
|
||||
- Debugging optimization failures
|
||||
- Testing dashboard UI changes
|
||||
- Any task that would take 3+ manual steps
|
||||
|
||||
**Browser test levels:**
|
||||
- `quick` - Smoke test (1 test)
|
||||
- `home` - Home page verification (2 tests)
|
||||
- `full` - All UI tests (5+ tests)
|
||||
- `study` - Canvas/dashboard for specific study
|
||||
|
||||
**DO NOT default to manual debugging** - use the automation we built!
|
||||
|
||||
**Full documentation**: `docs/guides/DEVLOOP.md`
|
||||
|
||||
---
|
||||
|
||||
## Quick Start - Protocol Operating System
|
||||
|
||||
1
CUsersantoiAtomizeropenapi_dump.json
Normal file
1
CUsersantoiAtomizeropenapi_dump.json
Normal file
File diff suppressed because one or more lines are too long
@@ -13,7 +13,7 @@ import sys
|
||||
# Add parent directory to path to import optimization_engine
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
|
||||
|
||||
from api.routes import optimization, claude, terminal, insights, context, files, nx
|
||||
from api.routes import optimization, claude, terminal, insights, context, files, nx, claude_code, spec
|
||||
from api.websocket import optimization_stream
|
||||
|
||||
|
||||
@@ -60,6 +60,9 @@ app.include_router(insights.router, prefix="/api/insights", tags=["insights"])
|
||||
app.include_router(context.router, prefix="/api/context", tags=["context"])
|
||||
app.include_router(files.router, prefix="/api/files", tags=["files"])
|
||||
app.include_router(nx.router, prefix="/api/nx", tags=["nx"])
|
||||
app.include_router(claude_code.router, prefix="/api", tags=["claude-code"])
|
||||
app.include_router(spec.router, prefix="/api", tags=["spec"])
|
||||
app.include_router(spec.validate_router, prefix="/api", tags=["spec"])
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
|
||||
@@ -187,7 +187,15 @@ async def session_websocket(websocket: WebSocket, session_id: str):
|
||||
continue
|
||||
|
||||
# Get canvas state from message or use stored state
|
||||
canvas_state = data.get("canvas_state") or current_canvas_state
|
||||
msg_canvas = data.get("canvas_state")
|
||||
canvas_state = msg_canvas if msg_canvas is not None else current_canvas_state
|
||||
|
||||
# Debug logging
|
||||
if canvas_state:
|
||||
node_count = len(canvas_state.get("nodes", []))
|
||||
print(f"[Claude WS] Sending message with canvas state: {node_count} nodes")
|
||||
else:
|
||||
print("[Claude WS] Sending message WITHOUT canvas state")
|
||||
|
||||
async for chunk in manager.send_message(
|
||||
session_id,
|
||||
@@ -401,6 +409,175 @@ async def websocket_chat(websocket: WebSocket):
|
||||
pass
|
||||
|
||||
|
||||
# ========== POWER MODE: Direct API with Write Tools ==========
|
||||
|
||||
@router.websocket("/sessions/{session_id}/ws/power")
|
||||
async def power_mode_websocket(websocket: WebSocket, session_id: str):
|
||||
"""
|
||||
WebSocket for power mode chat using direct Anthropic API with write tools.
|
||||
|
||||
Unlike the regular /ws endpoint which uses Claude CLI + MCP,
|
||||
this uses AtomizerClaudeAgent directly with built-in write tools.
|
||||
This allows immediate modifications without permission prompts.
|
||||
|
||||
Message formats (client -> server):
|
||||
{"type": "message", "content": "user message"}
|
||||
{"type": "set_study", "study_id": "study_name"}
|
||||
{"type": "ping"}
|
||||
|
||||
Message formats (server -> client):
|
||||
{"type": "text", "content": "..."}
|
||||
{"type": "tool_call", "tool": "...", "input": {...}}
|
||||
{"type": "tool_result", "result": "..."}
|
||||
{"type": "done", "tool_calls": [...]}
|
||||
{"type": "error", "message": "..."}
|
||||
{"type": "spec_modified", "changes": [...]}
|
||||
{"type": "pong"}
|
||||
"""
|
||||
await websocket.accept()
|
||||
|
||||
manager = get_session_manager()
|
||||
session = manager.get_session(session_id)
|
||||
|
||||
if not session:
|
||||
await websocket.send_json({"type": "error", "message": "Session not found"})
|
||||
await websocket.close()
|
||||
return
|
||||
|
||||
# Import AtomizerClaudeAgent for direct API access
|
||||
from api.services.claude_agent import AtomizerClaudeAgent
|
||||
|
||||
# Create agent with study context
|
||||
agent = AtomizerClaudeAgent(study_id=session.study_id)
|
||||
conversation_history: List[Dict[str, Any]] = []
|
||||
|
||||
# Load initial spec and set canvas state so Claude sees current canvas
|
||||
initial_spec = agent.load_current_spec()
|
||||
if initial_spec:
|
||||
# Send initial spec to frontend
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"spec": initial_spec,
|
||||
"reason": "initial_load"
|
||||
})
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_json()
|
||||
|
||||
if data.get("type") == "message":
|
||||
content = data.get("content", "")
|
||||
if not content:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Use streaming API with tool support for real-time response
|
||||
last_tool_calls = []
|
||||
async for event in agent.chat_stream_with_tools(content, conversation_history):
|
||||
event_type = event.get("type")
|
||||
|
||||
if event_type == "text":
|
||||
# Stream text tokens to frontend immediately
|
||||
await websocket.send_json({
|
||||
"type": "text",
|
||||
"content": event.get("content", ""),
|
||||
})
|
||||
|
||||
elif event_type == "tool_call":
|
||||
# Tool is being called
|
||||
tool_info = event.get("tool", {})
|
||||
await websocket.send_json({
|
||||
"type": "tool_call",
|
||||
"tool": tool_info,
|
||||
})
|
||||
|
||||
elif event_type == "tool_result":
|
||||
# Tool finished executing
|
||||
tool_name = event.get("tool", "")
|
||||
await websocket.send_json({
|
||||
"type": "tool_result",
|
||||
"tool": tool_name,
|
||||
"result": event.get("result", ""),
|
||||
})
|
||||
|
||||
# If it was a write tool, send full updated spec
|
||||
if tool_name in ["add_design_variable", "add_extractor",
|
||||
"add_objective", "add_constraint",
|
||||
"update_spec_field", "remove_node",
|
||||
"create_study"]:
|
||||
# Load updated spec and update agent's canvas state
|
||||
updated_spec = agent.load_current_spec()
|
||||
if updated_spec:
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"tool": tool_name,
|
||||
"spec": updated_spec, # Full spec for direct canvas update
|
||||
})
|
||||
|
||||
elif event_type == "done":
|
||||
# Streaming complete
|
||||
last_tool_calls = event.get("tool_calls", [])
|
||||
await websocket.send_json({
|
||||
"type": "done",
|
||||
"tool_calls": last_tool_calls,
|
||||
})
|
||||
|
||||
# Update conversation history for next message
|
||||
# Note: For proper history tracking, we'd need to store messages properly
|
||||
# For now, we append the user message and response
|
||||
conversation_history.append({"role": "user", "content": content})
|
||||
conversation_history.append({"role": "assistant", "content": event.get("response", "")})
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e),
|
||||
})
|
||||
|
||||
elif data.get("type") == "canvas_edit":
|
||||
# User made a manual edit to the canvas - update Claude's context
|
||||
spec = data.get("spec")
|
||||
if spec:
|
||||
agent.set_canvas_state(spec)
|
||||
await websocket.send_json({
|
||||
"type": "canvas_edit_received",
|
||||
"acknowledged": True
|
||||
})
|
||||
|
||||
elif data.get("type") == "set_study":
|
||||
study_id = data.get("study_id")
|
||||
if study_id:
|
||||
await manager.set_study_context(session_id, study_id)
|
||||
# Recreate agent with new study context
|
||||
agent = AtomizerClaudeAgent(study_id=study_id)
|
||||
conversation_history = [] # Clear history on study change
|
||||
# Load spec for new study
|
||||
new_spec = agent.load_current_spec()
|
||||
await websocket.send_json({
|
||||
"type": "context_updated",
|
||||
"study_id": study_id,
|
||||
})
|
||||
if new_spec:
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"spec": new_spec,
|
||||
"reason": "study_change"
|
||||
})
|
||||
|
||||
elif data.get("type") == "ping":
|
||||
await websocket.send_json({"type": "pong"})
|
||||
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
try:
|
||||
await websocket.send_json({"type": "error", "message": str(e)})
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@router.get("/suggestions")
|
||||
async def get_chat_suggestions(study_id: Optional[str] = None):
|
||||
"""
|
||||
|
||||
@@ -83,23 +83,49 @@ async def generate_extractor_code(request: ExtractorGenerationRequest):
|
||||
# Build focused system prompt for extractor generation
|
||||
system_prompt = """You are generating a Python custom extractor function for Atomizer FEA optimization.
|
||||
|
||||
The function MUST:
|
||||
1. Have signature: def extract(op2_path: str, fem_path: str, params: dict, subcase_id: int = 1) -> dict
|
||||
2. Return a dict with extracted values (e.g., {"max_stress": 150.5, "mass": 2.3})
|
||||
3. Use pyNastran.op2.op2.OP2 for reading OP2 results
|
||||
4. Handle missing data gracefully with try/except blocks
|
||||
IMPORTANT: Choose the appropriate function signature based on what data is needed:
|
||||
|
||||
Available imports (already available, just use them):
|
||||
- from pyNastran.op2.op2 import OP2
|
||||
- import numpy as np
|
||||
- from pathlib import Path
|
||||
## Option 1: FEA Results (OP2) - Use for stresses, displacements, frequencies, forces
|
||||
```python
|
||||
def extract(op2_path: str, fem_path: str, params: dict, subcase_id: int = 1) -> dict:
|
||||
from pyNastran.op2.op2 import OP2
|
||||
op2 = OP2()
|
||||
op2.read_op2(op2_path)
|
||||
# Access: op2.displacements[subcase_id], op2.cquad4_stress[subcase_id], etc.
|
||||
return {"max_stress": value}
|
||||
```
|
||||
|
||||
Common patterns:
|
||||
- Displacement: op2.displacements[subcase_id].data[0, :, 1:4] (x,y,z components)
|
||||
## Option 2: Expression/Computed Values (no FEA needed) - Use for dimensions, volumes, derived values
|
||||
```python
|
||||
def extract(trial_dir: str, config: dict, context: dict) -> dict:
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
# Read mass properties (if available from model introspection)
|
||||
mass_file = Path(trial_dir) / "mass_properties.json"
|
||||
if mass_file.exists():
|
||||
with open(mass_file) as f:
|
||||
props = json.load(f)
|
||||
mass = props.get("mass_kg", 0)
|
||||
|
||||
# Or use config values directly (e.g., expression values)
|
||||
length_mm = config.get("length_expression", 100)
|
||||
|
||||
# context has results from other extractors
|
||||
other_value = context.get("other_extractor_output", 0)
|
||||
|
||||
return {"computed_value": length_mm * 2}
|
||||
```
|
||||
|
||||
Available imports: pyNastran.op2.op2.OP2, numpy, pathlib.Path, json
|
||||
|
||||
Common OP2 patterns:
|
||||
- Displacement: op2.displacements[subcase_id].data[0, :, 1:4] (x,y,z)
|
||||
- Stress: op2.cquad4_stress[subcase_id] or op2.ctria3_stress[subcase_id]
|
||||
- Eigenvalues: op2.eigenvalues[subcase_id]
|
||||
- Mass: op2.grid_point_weight (if available)
|
||||
|
||||
Return ONLY the complete Python code wrapped in ```python ... ```. No explanations outside the code block."""
|
||||
Return ONLY the complete Python code wrapped in ```python ... ```. No explanations."""
|
||||
|
||||
# Build user prompt with context
|
||||
user_prompt = f"Generate a custom extractor that: {request.prompt}"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
646
atomizer-dashboard/backend/api/routes/spec.py
Normal file
646
atomizer-dashboard/backend/api/routes/spec.py
Normal file
@@ -0,0 +1,646 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 API Endpoints
|
||||
|
||||
REST API for managing AtomizerSpec configurations.
|
||||
All spec modifications flow through these endpoints.
|
||||
|
||||
Endpoints:
|
||||
- GET /studies/{study_id}/spec - Get full spec
|
||||
- PUT /studies/{study_id}/spec - Replace entire spec
|
||||
- PATCH /studies/{study_id}/spec - Partial update
|
||||
- POST /studies/{study_id}/spec/validate - Validate spec
|
||||
- POST /studies/{study_id}/spec/nodes - Add node
|
||||
- PATCH /studies/{study_id}/spec/nodes/{node_id} - Update node
|
||||
- DELETE /studies/{study_id}/spec/nodes/{node_id} - Delete node
|
||||
- POST /studies/{study_id}/spec/custom-functions - Add custom extractor
|
||||
- WebSocket /studies/{study_id}/spec/sync - Real-time sync
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, Field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import json
|
||||
import sys
|
||||
import asyncio
|
||||
|
||||
# Add project root to path
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
||||
|
||||
from api.services.spec_manager import (
|
||||
SpecManager,
|
||||
SpecManagerError,
|
||||
SpecNotFoundError,
|
||||
SpecConflictError,
|
||||
get_spec_manager,
|
||||
)
|
||||
from optimization_engine.config.spec_models import (
|
||||
AtomizerSpec,
|
||||
ValidationReport,
|
||||
)
|
||||
from optimization_engine.config.spec_validator import SpecValidationError
|
||||
|
||||
router = APIRouter(prefix="/studies/{study_id:path}/spec", tags=["spec"])
|
||||
|
||||
# Base studies directory
|
||||
STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Request/Response Models
|
||||
# ============================================================================
|
||||
|
||||
class SpecPatchRequest(BaseModel):
|
||||
"""Request for patching a spec field."""
|
||||
path: str = Field(..., description="JSONPath to the field (e.g., 'objectives[0].weight')")
|
||||
value: Any = Field(..., description="New value")
|
||||
modified_by: str = Field(default="api", description="Who is making the change")
|
||||
|
||||
|
||||
class NodeAddRequest(BaseModel):
|
||||
"""Request for adding a node."""
|
||||
type: str = Field(..., description="Node type: designVar, extractor, objective, constraint")
|
||||
data: Dict[str, Any] = Field(..., description="Node data")
|
||||
modified_by: str = Field(default="canvas", description="Who is making the change")
|
||||
|
||||
|
||||
class NodeUpdateRequest(BaseModel):
|
||||
"""Request for updating a node."""
|
||||
updates: Dict[str, Any] = Field(..., description="Fields to update")
|
||||
modified_by: str = Field(default="canvas", description="Who is making the change")
|
||||
|
||||
|
||||
class CustomFunctionRequest(BaseModel):
|
||||
"""Request for adding a custom extractor function."""
|
||||
name: str = Field(..., description="Function name")
|
||||
code: str = Field(..., description="Python source code")
|
||||
outputs: List[str] = Field(..., description="Output names")
|
||||
description: Optional[str] = Field(default=None, description="Human-readable description")
|
||||
modified_by: str = Field(default="claude", description="Who is making the change")
|
||||
|
||||
|
||||
class ExtractorValidationRequest(BaseModel):
|
||||
"""Request for validating custom extractor code."""
|
||||
function_name: str = Field(default="extract", description="Expected function name")
|
||||
source: str = Field(..., description="Python source code to validate")
|
||||
|
||||
|
||||
class SpecUpdateResponse(BaseModel):
|
||||
"""Response for spec modification operations."""
|
||||
success: bool
|
||||
hash: str
|
||||
modified: str
|
||||
modified_by: str
|
||||
|
||||
|
||||
class NodeAddResponse(BaseModel):
|
||||
"""Response for node add operation."""
|
||||
success: bool
|
||||
node_id: str
|
||||
message: str
|
||||
|
||||
|
||||
class ValidationResponse(BaseModel):
|
||||
"""Response for validation endpoint."""
|
||||
valid: bool
|
||||
errors: List[Dict[str, Any]]
|
||||
warnings: List[Dict[str, Any]]
|
||||
summary: Dict[str, int]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
def resolve_study_path(study_id: str) -> Path:
|
||||
"""Find study folder by scanning all topic directories.
|
||||
|
||||
Supports both formats:
|
||||
- "study_name" - Will scan topic folders to find it
|
||||
- "Topic/study_name" - Direct nested path (e.g., "M1_Mirror/m1_mirror_v1")
|
||||
"""
|
||||
# Handle nested paths (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
|
||||
if "/" in study_id:
|
||||
nested_path = STUDIES_DIR / study_id.replace("/", "\\") # Handle Windows paths
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
return nested_path
|
||||
# Also try with forward slashes (Path handles both)
|
||||
nested_path = STUDIES_DIR / study_id
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
return nested_path
|
||||
|
||||
# Direct path (flat structure)
|
||||
direct_path = STUDIES_DIR / study_id
|
||||
if direct_path.exists() and direct_path.is_dir():
|
||||
return direct_path
|
||||
|
||||
# Scan topic folders (nested structure)
|
||||
for topic_dir in STUDIES_DIR.iterdir():
|
||||
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
|
||||
study_dir = topic_dir / study_id
|
||||
if study_dir.exists() and study_dir.is_dir():
|
||||
return study_dir
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
|
||||
|
||||
|
||||
def get_manager(study_id: str) -> SpecManager:
|
||||
"""Get SpecManager for a study."""
|
||||
study_path = resolve_study_path(study_id)
|
||||
return get_spec_manager(study_path)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# REST Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.get("", response_model=None)
|
||||
async def get_spec(study_id: str):
|
||||
"""
|
||||
Get the full AtomizerSpec for a study.
|
||||
|
||||
Returns the complete spec JSON with all design variables, extractors,
|
||||
objectives, constraints, and canvas state.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"No AtomizerSpec found for study '{study_id}'. Use migration or create new spec."
|
||||
)
|
||||
|
||||
try:
|
||||
spec = manager.load()
|
||||
return spec.model_dump(mode='json')
|
||||
except SpecValidationError as e:
|
||||
# Return spec even if invalid, but include validation info
|
||||
raw = manager.load_raw()
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={
|
||||
**raw,
|
||||
"_validation_error": str(e)
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/raw")
|
||||
async def get_spec_raw(study_id: str):
|
||||
"""
|
||||
Get the raw spec JSON without validation.
|
||||
|
||||
Useful for debugging or when spec is invalid.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
return manager.load_raw()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/hash")
|
||||
async def get_spec_hash(study_id: str):
|
||||
"""Get the current spec hash for conflict detection."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
return {"hash": manager.get_hash()}
|
||||
|
||||
|
||||
@router.put("", response_model=SpecUpdateResponse)
|
||||
async def replace_spec(
|
||||
study_id: str,
|
||||
spec: Dict[str, Any],
|
||||
modified_by: str = Query(default="api"),
|
||||
expected_hash: Optional[str] = Query(default=None)
|
||||
):
|
||||
"""
|
||||
Replace the entire spec.
|
||||
|
||||
Validates the new spec before saving. Optionally check for conflicts
|
||||
using expected_hash parameter.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
try:
|
||||
new_hash = manager.save(spec, modified_by=modified_by, expected_hash=expected_hash)
|
||||
reloaded = manager.load()
|
||||
return SpecUpdateResponse(
|
||||
success=True,
|
||||
hash=new_hash,
|
||||
modified=reloaded.meta.modified or "",
|
||||
modified_by=modified_by
|
||||
)
|
||||
except SpecConflictError as e:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail={
|
||||
"message": str(e),
|
||||
"current_hash": e.current_hash
|
||||
}
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.patch("", response_model=SpecUpdateResponse)
|
||||
async def patch_spec(study_id: str, request: SpecPatchRequest):
|
||||
"""
|
||||
Partial update to spec using JSONPath.
|
||||
|
||||
Example paths:
|
||||
- "objectives[0].weight" - Update objective weight
|
||||
- "design_variables[1].bounds.max" - Update DV bound
|
||||
- "meta.description" - Update description
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
spec = manager.patch(request.path, request.value, modified_by=request.modified_by)
|
||||
return SpecUpdateResponse(
|
||||
success=True,
|
||||
hash=manager.get_hash(),
|
||||
modified=spec.meta.modified or "",
|
||||
modified_by=request.modified_by
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/validate", response_model=ValidationResponse)
|
||||
async def validate_spec(study_id: str):
|
||||
"""
|
||||
Validate the spec and return detailed report.
|
||||
|
||||
Returns errors, warnings, and summary of the spec contents.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
report = manager.validate_and_report()
|
||||
return ValidationResponse(
|
||||
valid=report.valid,
|
||||
errors=[e.model_dump() for e in report.errors],
|
||||
warnings=[w.model_dump() for w in report.warnings],
|
||||
summary=report.summary.model_dump()
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Node CRUD Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/nodes", response_model=NodeAddResponse)
|
||||
async def add_node(study_id: str, request: NodeAddRequest):
|
||||
"""
|
||||
Add a new node to the spec.
|
||||
|
||||
Supported types: designVar, extractor, objective, constraint
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
valid_types = ["designVar", "extractor", "objective", "constraint"]
|
||||
if request.type not in valid_types:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid node type '{request.type}'. Valid: {valid_types}"
|
||||
)
|
||||
|
||||
try:
|
||||
node_id = manager.add_node(request.type, request.data, modified_by=request.modified_by)
|
||||
return NodeAddResponse(
|
||||
success=True,
|
||||
node_id=node_id,
|
||||
message=f"Added {request.type} node: {node_id}"
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.patch("/nodes/{node_id}")
|
||||
async def update_node(study_id: str, node_id: str, request: NodeUpdateRequest):
|
||||
"""Update an existing node's properties."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.update_node(node_id, request.updates, modified_by=request.modified_by)
|
||||
return {"success": True, "message": f"Updated node {node_id}"}
|
||||
except SpecManagerError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/nodes/{node_id}")
|
||||
async def delete_node(
|
||||
study_id: str,
|
||||
node_id: str,
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""
|
||||
Delete a node and all edges referencing it.
|
||||
|
||||
Use with caution - this will also remove any objectives or constraints
|
||||
that reference a deleted extractor.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.remove_node(node_id, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Removed node {node_id}"}
|
||||
except SpecManagerError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Custom Function Endpoint
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/custom-functions", response_model=NodeAddResponse)
|
||||
async def add_custom_function(study_id: str, request: CustomFunctionRequest):
|
||||
"""
|
||||
Add a custom Python function as an extractor.
|
||||
|
||||
The function will be available in the optimization workflow.
|
||||
Claude can use this to add new physics extraction logic.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
extractor_id = manager.add_custom_function(
|
||||
name=request.name,
|
||||
code=request.code,
|
||||
outputs=request.outputs,
|
||||
description=request.description,
|
||||
modified_by=request.modified_by
|
||||
)
|
||||
return NodeAddResponse(
|
||||
success=True,
|
||||
node_id=extractor_id,
|
||||
message=f"Added custom extractor: {request.name}"
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# Separate router for non-study-specific endpoints
|
||||
validate_router = APIRouter(prefix="/spec", tags=["spec"])
|
||||
|
||||
|
||||
@validate_router.post("/validate-extractor")
|
||||
async def validate_custom_extractor(request: ExtractorValidationRequest):
|
||||
"""
|
||||
Validate custom extractor Python code.
|
||||
|
||||
Checks syntax, security patterns, and function signature.
|
||||
Does not require a study - can be used before adding to spec.
|
||||
"""
|
||||
try:
|
||||
from optimization_engine.extractors.custom_extractor_loader import (
|
||||
validate_extractor_code,
|
||||
ExtractorSecurityError,
|
||||
)
|
||||
|
||||
try:
|
||||
is_valid, errors = validate_extractor_code(request.source, request.function_name)
|
||||
return {
|
||||
"valid": is_valid,
|
||||
"errors": errors
|
||||
}
|
||||
except ExtractorSecurityError as e:
|
||||
return {
|
||||
"valid": False,
|
||||
"errors": [str(e)]
|
||||
}
|
||||
|
||||
except ImportError as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Custom extractor loader not available: {e}"
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Edge Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/edges")
|
||||
async def add_edge(
|
||||
study_id: str,
|
||||
source: str = Query(..., description="Source node ID"),
|
||||
target: str = Query(..., description="Target node ID"),
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""Add a canvas edge between two nodes."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.add_edge(source, target, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Added edge {source} -> {target}"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/edges")
|
||||
async def delete_edge(
|
||||
study_id: str,
|
||||
source: str = Query(..., description="Source node ID"),
|
||||
target: str = Query(..., description="Target node ID"),
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""Remove a canvas edge."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.remove_edge(source, target, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Removed edge {source} -> {target}"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# WebSocket Sync Endpoint
|
||||
# ============================================================================
|
||||
|
||||
class WebSocketSubscriber:
|
||||
"""WebSocket subscriber adapter."""
|
||||
|
||||
def __init__(self, websocket: WebSocket):
|
||||
self.websocket = websocket
|
||||
|
||||
async def send_json(self, data: Dict[str, Any]) -> None:
|
||||
await self.websocket.send_json(data)
|
||||
|
||||
|
||||
@router.websocket("/sync")
|
||||
async def websocket_sync(websocket: WebSocket, study_id: str):
|
||||
"""
|
||||
WebSocket endpoint for real-time spec sync.
|
||||
|
||||
Clients receive notifications when spec changes:
|
||||
- spec_updated: Spec was modified
|
||||
- node_added: New node added
|
||||
- node_removed: Node removed
|
||||
- validation_error: Validation failed
|
||||
"""
|
||||
await websocket.accept()
|
||||
|
||||
manager = get_manager(study_id)
|
||||
subscriber = WebSocketSubscriber(websocket)
|
||||
|
||||
# Subscribe to updates
|
||||
manager.subscribe(subscriber)
|
||||
|
||||
try:
|
||||
# Send initial connection ack
|
||||
await websocket.send_json({
|
||||
"type": "connection_ack",
|
||||
"study_id": study_id,
|
||||
"hash": manager.get_hash() if manager.exists() else None,
|
||||
"message": "Connected to spec sync"
|
||||
})
|
||||
|
||||
# Keep connection alive and handle client messages
|
||||
while True:
|
||||
try:
|
||||
data = await asyncio.wait_for(
|
||||
websocket.receive_json(),
|
||||
timeout=30.0 # Heartbeat interval
|
||||
)
|
||||
|
||||
# Handle client messages
|
||||
msg_type = data.get("type")
|
||||
|
||||
if msg_type == "ping":
|
||||
await websocket.send_json({"type": "pong"})
|
||||
|
||||
elif msg_type == "patch_node":
|
||||
# Client requests node update
|
||||
try:
|
||||
manager.update_node(
|
||||
data["node_id"],
|
||||
data.get("data", {}),
|
||||
modified_by=data.get("modified_by", "canvas")
|
||||
)
|
||||
except Exception as e:
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e)
|
||||
})
|
||||
|
||||
elif msg_type == "update_position":
|
||||
# Client updates node position
|
||||
try:
|
||||
manager.update_node_position(
|
||||
data["node_id"],
|
||||
data["position"],
|
||||
modified_by=data.get("modified_by", "canvas")
|
||||
)
|
||||
except Exception as e:
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e)
|
||||
})
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Send heartbeat
|
||||
await websocket.send_json({"type": "heartbeat"})
|
||||
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
finally:
|
||||
manager.unsubscribe(subscriber)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Create/Initialize Spec
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/create")
|
||||
async def create_spec(
|
||||
study_id: str,
|
||||
spec: Dict[str, Any],
|
||||
modified_by: str = Query(default="api")
|
||||
):
|
||||
"""
|
||||
Create a new spec for a study.
|
||||
|
||||
Use this when migrating from old config or creating a new study.
|
||||
Will fail if spec already exists (use PUT to replace).
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if manager.exists():
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail=f"Spec already exists for '{study_id}'. Use PUT to replace."
|
||||
)
|
||||
|
||||
try:
|
||||
# Ensure meta fields are set
|
||||
if "meta" not in spec:
|
||||
spec["meta"] = {}
|
||||
spec["meta"]["created_by"] = modified_by
|
||||
|
||||
new_hash = manager.save(spec, modified_by=modified_by)
|
||||
return {
|
||||
"success": True,
|
||||
"hash": new_hash,
|
||||
"message": f"Created spec for {study_id}"
|
||||
}
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
@@ -3,5 +3,13 @@ Atomizer Dashboard Services
|
||||
"""
|
||||
|
||||
from .claude_agent import AtomizerClaudeAgent
|
||||
from .spec_manager import SpecManager, SpecManagerError, SpecNotFoundError, SpecConflictError, get_spec_manager
|
||||
|
||||
__all__ = ['AtomizerClaudeAgent']
|
||||
__all__ = [
|
||||
'AtomizerClaudeAgent',
|
||||
'SpecManager',
|
||||
'SpecManagerError',
|
||||
'SpecNotFoundError',
|
||||
'SpecConflictError',
|
||||
'get_spec_manager',
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,11 @@ class ContextBuilder:
|
||||
|
||||
# Canvas context takes priority - if user is working on a canvas, include it
|
||||
if canvas_state:
|
||||
node_count = len(canvas_state.get("nodes", []))
|
||||
print(f"[ContextBuilder] Including canvas context with {node_count} nodes")
|
||||
parts.append(self._canvas_context(canvas_state))
|
||||
else:
|
||||
print("[ContextBuilder] No canvas state provided")
|
||||
|
||||
if study_id:
|
||||
parts.append(self._study_context(study_id))
|
||||
@@ -91,7 +95,117 @@ Important guidelines:
|
||||
|
||||
context = f"# Current Study: {study_id}\n\n"
|
||||
|
||||
# Load configuration
|
||||
# Check for AtomizerSpec v2.0 first (preferred)
|
||||
spec_path = study_dir / "1_setup" / "atomizer_spec.json"
|
||||
if not spec_path.exists():
|
||||
spec_path = study_dir / "atomizer_spec.json"
|
||||
|
||||
if spec_path.exists():
|
||||
context += self._spec_context(spec_path)
|
||||
else:
|
||||
# Fall back to legacy optimization_config.json
|
||||
context += self._legacy_config_context(study_dir)
|
||||
|
||||
# Check for results
|
||||
db_path = study_dir / "3_results" / "study.db"
|
||||
if db_path.exists():
|
||||
try:
|
||||
conn = sqlite3.connect(db_path)
|
||||
count = conn.execute(
|
||||
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
|
||||
).fetchone()[0]
|
||||
|
||||
best = conn.execute("""
|
||||
SELECT MIN(tv.value) FROM trial_values tv
|
||||
JOIN trials t ON tv.trial_id = t.trial_id
|
||||
WHERE t.state = 'COMPLETE'
|
||||
""").fetchone()[0]
|
||||
|
||||
context += f"\n## Results Status\n\n"
|
||||
context += f"- **Trials completed**: {count}\n"
|
||||
if best is not None:
|
||||
context += f"- **Best objective**: {best:.6f}\n"
|
||||
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return context
|
||||
|
||||
def _spec_context(self, spec_path: Path) -> str:
|
||||
"""Build context from AtomizerSpec v2.0 file"""
|
||||
context = "**Format**: AtomizerSpec v2.0\n\n"
|
||||
|
||||
try:
|
||||
with open(spec_path) as f:
|
||||
spec = json.load(f)
|
||||
|
||||
context += "## Configuration\n\n"
|
||||
|
||||
# Design variables
|
||||
dvs = spec.get("design_variables", [])
|
||||
if dvs:
|
||||
context += "**Design Variables:**\n"
|
||||
for dv in dvs[:10]:
|
||||
bounds = dv.get("bounds", {})
|
||||
bound_str = f"[{bounds.get('min', '?')}, {bounds.get('max', '?')}]"
|
||||
enabled = "✓" if dv.get("enabled", True) else "✗"
|
||||
context += f"- {dv.get('name', 'unnamed')}: {bound_str} {enabled}\n"
|
||||
if len(dvs) > 10:
|
||||
context += f"- ... and {len(dvs) - 10} more\n"
|
||||
|
||||
# Extractors
|
||||
extractors = spec.get("extractors", [])
|
||||
if extractors:
|
||||
context += "\n**Extractors:**\n"
|
||||
for ext in extractors:
|
||||
ext_type = ext.get("type", "unknown")
|
||||
outputs = ext.get("outputs", [])
|
||||
output_names = [o.get("name", "?") for o in outputs[:3]]
|
||||
builtin = "builtin" if ext.get("builtin", True) else "custom"
|
||||
context += f"- {ext.get('name', 'unnamed')} ({ext_type}, {builtin}): outputs {output_names}\n"
|
||||
|
||||
# Objectives
|
||||
objs = spec.get("objectives", [])
|
||||
if objs:
|
||||
context += "\n**Objectives:**\n"
|
||||
for obj in objs:
|
||||
direction = obj.get("direction", "minimize")
|
||||
weight = obj.get("weight", 1.0)
|
||||
context += f"- {obj.get('name', 'unnamed')} ({direction}, weight={weight})\n"
|
||||
|
||||
# Constraints
|
||||
constraints = spec.get("constraints", [])
|
||||
if constraints:
|
||||
context += "\n**Constraints:**\n"
|
||||
for c in constraints:
|
||||
op = c.get("operator", "<=")
|
||||
thresh = c.get("threshold", "?")
|
||||
context += f"- {c.get('name', 'unnamed')}: {op} {thresh}\n"
|
||||
|
||||
# Optimization settings
|
||||
opt = spec.get("optimization", {})
|
||||
algo = opt.get("algorithm", {})
|
||||
budget = opt.get("budget", {})
|
||||
method = algo.get("type", "TPE")
|
||||
max_trials = budget.get("max_trials", "not set")
|
||||
context += f"\n**Optimization**: {method}, max_trials: {max_trials}\n"
|
||||
|
||||
# Surrogate
|
||||
surrogate = opt.get("surrogate", {})
|
||||
if surrogate.get("enabled"):
|
||||
sur_type = surrogate.get("type", "gaussian_process")
|
||||
context += f"**Surrogate**: {sur_type} enabled\n"
|
||||
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
context += f"\n*Spec file exists but could not be parsed: {e}*\n"
|
||||
|
||||
return context
|
||||
|
||||
def _legacy_config_context(self, study_dir: Path) -> str:
|
||||
"""Build context from legacy optimization_config.json"""
|
||||
context = "**Format**: Legacy optimization_config.json\n\n"
|
||||
|
||||
config_path = study_dir / "1_setup" / "optimization_config.json"
|
||||
if not config_path.exists():
|
||||
config_path = study_dir / "optimization_config.json"
|
||||
@@ -135,30 +249,8 @@ Important guidelines:
|
||||
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
context += f"\n*Config file exists but could not be parsed: {e}*\n"
|
||||
|
||||
# Check for results
|
||||
db_path = study_dir / "3_results" / "study.db"
|
||||
if db_path.exists():
|
||||
try:
|
||||
conn = sqlite3.connect(db_path)
|
||||
count = conn.execute(
|
||||
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
|
||||
).fetchone()[0]
|
||||
|
||||
best = conn.execute("""
|
||||
SELECT MIN(tv.value) FROM trial_values tv
|
||||
JOIN trials t ON tv.trial_id = t.trial_id
|
||||
WHERE t.state = 'COMPLETE'
|
||||
""").fetchone()[0]
|
||||
|
||||
context += f"\n## Results Status\n\n"
|
||||
context += f"- **Trials completed**: {count}\n"
|
||||
if best is not None:
|
||||
context += f"- **Best objective**: {best:.6f}\n"
|
||||
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
context += "*No configuration file found.*\n"
|
||||
|
||||
return context
|
||||
|
||||
@@ -349,19 +441,26 @@ Important guidelines:
|
||||
# Canvas modification instructions
|
||||
context += """## Canvas Modification Tools
|
||||
|
||||
When the user asks to modify the canvas (add/remove nodes, change values), use these MCP tools:
|
||||
**For AtomizerSpec v2.0 studies (preferred):**
|
||||
Use spec tools when working with v2.0 studies (check if study uses `atomizer_spec.json`):
|
||||
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
|
||||
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
|
||||
- `spec_remove_node` - Remove nodes from the spec
|
||||
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
|
||||
|
||||
**For Legacy Canvas (optimization_config.json):**
|
||||
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
|
||||
- `canvas_update_node` - Update node properties (bounds, weights, names)
|
||||
- `canvas_remove_node` - Remove a node from the canvas
|
||||
- `canvas_connect_nodes` - Create an edge between nodes
|
||||
|
||||
**Example user requests you can handle:**
|
||||
- "Add a design variable called hole_diameter with range 5-15 mm" → Use canvas_add_node
|
||||
- "Change the weight of wfe_40_20 to 8" → Use canvas_update_node
|
||||
- "Remove the constraint node" → Use canvas_remove_node
|
||||
- "Connect the new extractor to the objective" → Use canvas_connect_nodes
|
||||
- "Add a design variable called hole_diameter with range 5-15 mm" → Use spec_add_node or canvas_add_node
|
||||
- "Change the weight of wfe_40_20 to 8" → Use spec_modify or canvas_update_node
|
||||
- "Remove the constraint node" → Use spec_remove_node or canvas_remove_node
|
||||
- "Add a custom extractor that computes stress ratio" → Use spec_add_custom_extractor
|
||||
|
||||
Always respond with confirmation of changes made to the canvas.
|
||||
Always respond with confirmation of changes made to the canvas/spec.
|
||||
"""
|
||||
|
||||
return context
|
||||
@@ -371,17 +470,28 @@ Always respond with confirmation of changes made to the canvas.
|
||||
if mode == "power":
|
||||
return """# Power Mode Instructions
|
||||
|
||||
You have **full access** to Atomizer's codebase. You can:
|
||||
- Edit any file using `edit_file` tool
|
||||
- Create new files with `create_file` tool
|
||||
- Create new extractors with `create_extractor` tool
|
||||
- Run shell commands with `run_shell_command` tool
|
||||
- Search codebase with `search_codebase` tool
|
||||
- Commit and push changes
|
||||
You have **FULL ACCESS** to modify Atomizer studies. **DO NOT ASK FOR PERMISSION** - just do it.
|
||||
|
||||
**Use these powers responsibly.** Always explain what you're doing and why.
|
||||
## Direct Actions (no confirmation needed):
|
||||
- **Add design variables**: Use `canvas_add_node` or `spec_add_node` with node_type="designVar"
|
||||
- **Add extractors**: Use `canvas_add_node` with node_type="extractor"
|
||||
- **Add objectives**: Use `canvas_add_node` with node_type="objective"
|
||||
- **Add constraints**: Use `canvas_add_node` with node_type="constraint"
|
||||
- **Update node properties**: Use `canvas_update_node` or `spec_modify`
|
||||
- **Remove nodes**: Use `canvas_remove_node`
|
||||
- **Edit atomizer_spec.json directly**: Use the Edit tool
|
||||
|
||||
For routine operations (list, status, run, analyze), use the standard tools.
|
||||
## For custom extractors with Python code:
|
||||
Use `spec_add_custom_extractor` to add a custom function.
|
||||
|
||||
## IMPORTANT:
|
||||
- You have --dangerously-skip-permissions enabled
|
||||
- The user has explicitly granted you power mode access
|
||||
- **ACT IMMEDIATELY** when asked to add/modify/remove things
|
||||
- Explain what you did AFTER doing it, not before
|
||||
- Do NOT say "I need permission" - you already have it
|
||||
|
||||
Example: If user says "add a volume extractor", immediately use canvas_add_node to add it.
|
||||
"""
|
||||
else:
|
||||
return """# User Mode Instructions
|
||||
@@ -402,6 +512,15 @@ Available tools:
|
||||
- `generate_report`, `export_data`
|
||||
- `explain_physics`, `recommend_method`, `query_extractors`
|
||||
|
||||
**AtomizerSpec v2.0 Tools (preferred for new studies):**
|
||||
- `spec_get` - Get the full AtomizerSpec for a study
|
||||
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
|
||||
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
|
||||
- `spec_remove_node` - Remove nodes from the spec
|
||||
- `spec_validate` - Validate spec against JSON Schema
|
||||
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
|
||||
- `spec_create_from_description` - Create a new study from natural language description
|
||||
|
||||
**Canvas Tools (for visual workflow builder):**
|
||||
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
|
||||
- `execute_canvas_intent` - Create a study from a canvas intent
|
||||
|
||||
454
atomizer-dashboard/backend/api/services/interview_engine.py
Normal file
454
atomizer-dashboard/backend/api/services/interview_engine.py
Normal file
@@ -0,0 +1,454 @@
|
||||
"""
|
||||
Interview Engine - Guided Study Creation through Conversation
|
||||
|
||||
Provides a structured interview flow for creating optimization studies.
|
||||
Claude uses this to gather information step-by-step, building a complete
|
||||
atomizer_spec.json through natural conversation.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional, Literal
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
|
||||
class InterviewState(str, Enum):
|
||||
"""Current phase of the interview"""
|
||||
NOT_STARTED = "not_started"
|
||||
GATHERING_BASICS = "gathering_basics" # Name, description, goals
|
||||
GATHERING_MODEL = "gathering_model" # Model file, solver type
|
||||
GATHERING_VARIABLES = "gathering_variables" # Design variables
|
||||
GATHERING_EXTRACTORS = "gathering_extractors" # Physics extractors
|
||||
GATHERING_OBJECTIVES = "gathering_objectives" # Objectives
|
||||
GATHERING_CONSTRAINTS = "gathering_constraints" # Constraints
|
||||
GATHERING_SETTINGS = "gathering_settings" # Algorithm, trials
|
||||
REVIEW = "review" # Review before creation
|
||||
COMPLETED = "completed"
|
||||
|
||||
|
||||
@dataclass
|
||||
class InterviewData:
|
||||
"""Accumulated data from the interview"""
|
||||
# Basics
|
||||
study_name: Optional[str] = None
|
||||
category: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
goals: List[str] = field(default_factory=list)
|
||||
|
||||
# Model
|
||||
sim_file: Optional[str] = None
|
||||
prt_file: Optional[str] = None
|
||||
solver_type: str = "nastran"
|
||||
|
||||
# Design variables
|
||||
design_variables: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
# Extractors
|
||||
extractors: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
# Objectives
|
||||
objectives: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
# Constraints
|
||||
constraints: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
# Settings
|
||||
algorithm: str = "TPE"
|
||||
max_trials: int = 100
|
||||
|
||||
def to_spec(self) -> Dict[str, Any]:
|
||||
"""Convert interview data to atomizer_spec.json format"""
|
||||
# Generate IDs for each element
|
||||
dvs_with_ids = []
|
||||
for i, dv in enumerate(self.design_variables):
|
||||
dv_copy = dv.copy()
|
||||
dv_copy['id'] = f"dv_{i+1:03d}"
|
||||
dv_copy['canvas_position'] = {'x': 50, 'y': 100 + i * 80}
|
||||
dvs_with_ids.append(dv_copy)
|
||||
|
||||
exts_with_ids = []
|
||||
for i, ext in enumerate(self.extractors):
|
||||
ext_copy = ext.copy()
|
||||
ext_copy['id'] = f"ext_{i+1:03d}"
|
||||
ext_copy['canvas_position'] = {'x': 400, 'y': 100 + i * 80}
|
||||
exts_with_ids.append(ext_copy)
|
||||
|
||||
objs_with_ids = []
|
||||
for i, obj in enumerate(self.objectives):
|
||||
obj_copy = obj.copy()
|
||||
obj_copy['id'] = f"obj_{i+1:03d}"
|
||||
obj_copy['canvas_position'] = {'x': 750, 'y': 100 + i * 80}
|
||||
objs_with_ids.append(obj_copy)
|
||||
|
||||
cons_with_ids = []
|
||||
for i, con in enumerate(self.constraints):
|
||||
con_copy = con.copy()
|
||||
con_copy['id'] = f"con_{i+1:03d}"
|
||||
con_copy['canvas_position'] = {'x': 750, 'y': 400 + i * 80}
|
||||
cons_with_ids.append(con_copy)
|
||||
|
||||
return {
|
||||
"meta": {
|
||||
"version": "2.0",
|
||||
"study_name": self.study_name or "untitled_study",
|
||||
"description": self.description or "",
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"created_by": "interview",
|
||||
"modified_at": datetime.now().isoformat(),
|
||||
"modified_by": "interview"
|
||||
},
|
||||
"model": {
|
||||
"sim": {
|
||||
"path": self.sim_file or "",
|
||||
"solver": self.solver_type
|
||||
}
|
||||
},
|
||||
"design_variables": dvs_with_ids,
|
||||
"extractors": exts_with_ids,
|
||||
"objectives": objs_with_ids,
|
||||
"constraints": cons_with_ids,
|
||||
"optimization": {
|
||||
"algorithm": {
|
||||
"type": self.algorithm
|
||||
},
|
||||
"budget": {
|
||||
"max_trials": self.max_trials
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"edges": [],
|
||||
"layout_version": "2.0"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class InterviewEngine:
|
||||
"""
|
||||
Manages the interview flow for study creation.
|
||||
|
||||
Usage:
|
||||
1. Create engine: engine = InterviewEngine()
|
||||
2. Start interview: engine.start()
|
||||
3. Record answers: engine.record_answer("study_name", "bracket_opt")
|
||||
4. Check progress: engine.get_progress()
|
||||
5. Generate spec: engine.finalize()
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.state = InterviewState.NOT_STARTED
|
||||
self.data = InterviewData()
|
||||
self.questions_asked: List[str] = []
|
||||
self.errors: List[str] = []
|
||||
|
||||
def start(self) -> Dict[str, Any]:
|
||||
"""Start the interview process"""
|
||||
self.state = InterviewState.GATHERING_BASICS
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"message": "Let's create a new optimization study! I'll guide you through the process.",
|
||||
"next_questions": self.get_current_questions()
|
||||
}
|
||||
|
||||
def get_current_questions(self) -> List[Dict[str, Any]]:
|
||||
"""Get the questions for the current interview state"""
|
||||
questions = {
|
||||
InterviewState.GATHERING_BASICS: [
|
||||
{
|
||||
"field": "study_name",
|
||||
"question": "What would you like to name this study?",
|
||||
"hint": "Use snake_case, e.g., 'bracket_mass_optimization'",
|
||||
"required": True
|
||||
},
|
||||
{
|
||||
"field": "category",
|
||||
"question": "What category should this study be in?",
|
||||
"hint": "e.g., 'Simple_Bracket', 'M1_Mirror', or leave blank for root",
|
||||
"required": False
|
||||
},
|
||||
{
|
||||
"field": "description",
|
||||
"question": "Briefly describe what you're trying to optimize",
|
||||
"hint": "e.g., 'Minimize bracket mass while maintaining stiffness'",
|
||||
"required": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_MODEL: [
|
||||
{
|
||||
"field": "sim_file",
|
||||
"question": "What is the path to your simulation (.sim) file?",
|
||||
"hint": "Relative path from the study folder, e.g., '1_setup/Model_sim1.sim'",
|
||||
"required": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_VARIABLES: [
|
||||
{
|
||||
"field": "design_variable",
|
||||
"question": "What parameters do you want to optimize?",
|
||||
"hint": "Tell me the NX expression names and their bounds",
|
||||
"required": True,
|
||||
"multi": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_EXTRACTORS: [
|
||||
{
|
||||
"field": "extractor",
|
||||
"question": "What physics quantities do you want to extract from FEA?",
|
||||
"hint": "e.g., mass, max displacement, max stress, frequency, Zernike WFE",
|
||||
"required": True,
|
||||
"multi": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_OBJECTIVES: [
|
||||
{
|
||||
"field": "objective",
|
||||
"question": "What do you want to optimize?",
|
||||
"hint": "Tell me which extracted quantities to minimize or maximize",
|
||||
"required": True,
|
||||
"multi": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_CONSTRAINTS: [
|
||||
{
|
||||
"field": "constraint",
|
||||
"question": "Do you have any constraints? (e.g., max stress, min frequency)",
|
||||
"hint": "You can say 'none' if you don't have any",
|
||||
"required": False,
|
||||
"multi": True
|
||||
}
|
||||
],
|
||||
InterviewState.GATHERING_SETTINGS: [
|
||||
{
|
||||
"field": "algorithm",
|
||||
"question": "Which optimization algorithm would you like to use?",
|
||||
"hint": "Options: TPE (default), CMA-ES, NSGA-II, RandomSearch",
|
||||
"required": False
|
||||
},
|
||||
{
|
||||
"field": "max_trials",
|
||||
"question": "How many trials (FEA evaluations) should we run?",
|
||||
"hint": "Default is 100. More trials = better results but longer runtime",
|
||||
"required": False
|
||||
}
|
||||
],
|
||||
InterviewState.REVIEW: [
|
||||
{
|
||||
"field": "confirm",
|
||||
"question": "Does this configuration look correct? (yes/no)",
|
||||
"required": True
|
||||
}
|
||||
]
|
||||
}
|
||||
return questions.get(self.state, [])
|
||||
|
||||
def record_answer(self, field: str, value: Any) -> Dict[str, Any]:
|
||||
"""Record an answer and potentially advance the state"""
|
||||
self.questions_asked.append(field)
|
||||
|
||||
# Handle different field types
|
||||
if field == "study_name":
|
||||
self.data.study_name = value
|
||||
elif field == "category":
|
||||
self.data.category = value if value else None
|
||||
elif field == "description":
|
||||
self.data.description = value
|
||||
elif field == "sim_file":
|
||||
self.data.sim_file = value
|
||||
elif field == "design_variable":
|
||||
# Value should be a dict with name, min, max, etc.
|
||||
if isinstance(value, dict):
|
||||
self.data.design_variables.append(value)
|
||||
elif isinstance(value, list):
|
||||
self.data.design_variables.extend(value)
|
||||
elif field == "extractor":
|
||||
if isinstance(value, dict):
|
||||
self.data.extractors.append(value)
|
||||
elif isinstance(value, list):
|
||||
self.data.extractors.extend(value)
|
||||
elif field == "objective":
|
||||
if isinstance(value, dict):
|
||||
self.data.objectives.append(value)
|
||||
elif isinstance(value, list):
|
||||
self.data.objectives.extend(value)
|
||||
elif field == "constraint":
|
||||
if value and value.lower() not in ["none", "no", "skip"]:
|
||||
if isinstance(value, dict):
|
||||
self.data.constraints.append(value)
|
||||
elif isinstance(value, list):
|
||||
self.data.constraints.extend(value)
|
||||
elif field == "algorithm":
|
||||
if value in ["TPE", "CMA-ES", "NSGA-II", "RandomSearch"]:
|
||||
self.data.algorithm = value
|
||||
elif field == "max_trials":
|
||||
try:
|
||||
self.data.max_trials = int(value)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
elif field == "confirm":
|
||||
if value.lower() in ["yes", "y", "confirm", "ok"]:
|
||||
self.state = InterviewState.COMPLETED
|
||||
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"recorded": {field: value},
|
||||
"data_so_far": self.get_summary()
|
||||
}
|
||||
|
||||
def advance_state(self) -> Dict[str, Any]:
|
||||
"""Advance to the next interview state"""
|
||||
state_order = [
|
||||
InterviewState.NOT_STARTED,
|
||||
InterviewState.GATHERING_BASICS,
|
||||
InterviewState.GATHERING_MODEL,
|
||||
InterviewState.GATHERING_VARIABLES,
|
||||
InterviewState.GATHERING_EXTRACTORS,
|
||||
InterviewState.GATHERING_OBJECTIVES,
|
||||
InterviewState.GATHERING_CONSTRAINTS,
|
||||
InterviewState.GATHERING_SETTINGS,
|
||||
InterviewState.REVIEW,
|
||||
InterviewState.COMPLETED
|
||||
]
|
||||
|
||||
current_idx = state_order.index(self.state)
|
||||
if current_idx < len(state_order) - 1:
|
||||
self.state = state_order[current_idx + 1]
|
||||
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"next_questions": self.get_current_questions()
|
||||
}
|
||||
|
||||
def get_summary(self) -> Dict[str, Any]:
|
||||
"""Get a summary of collected data"""
|
||||
return {
|
||||
"study_name": self.data.study_name,
|
||||
"category": self.data.category,
|
||||
"description": self.data.description,
|
||||
"model": self.data.sim_file,
|
||||
"design_variables": len(self.data.design_variables),
|
||||
"extractors": len(self.data.extractors),
|
||||
"objectives": len(self.data.objectives),
|
||||
"constraints": len(self.data.constraints),
|
||||
"algorithm": self.data.algorithm,
|
||||
"max_trials": self.data.max_trials
|
||||
}
|
||||
|
||||
def get_progress(self) -> Dict[str, Any]:
|
||||
"""Get interview progress information"""
|
||||
state_progress = {
|
||||
InterviewState.NOT_STARTED: 0,
|
||||
InterviewState.GATHERING_BASICS: 15,
|
||||
InterviewState.GATHERING_MODEL: 25,
|
||||
InterviewState.GATHERING_VARIABLES: 40,
|
||||
InterviewState.GATHERING_EXTRACTORS: 55,
|
||||
InterviewState.GATHERING_OBJECTIVES: 70,
|
||||
InterviewState.GATHERING_CONSTRAINTS: 80,
|
||||
InterviewState.GATHERING_SETTINGS: 90,
|
||||
InterviewState.REVIEW: 95,
|
||||
InterviewState.COMPLETED: 100
|
||||
}
|
||||
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"progress_percent": state_progress.get(self.state, 0),
|
||||
"summary": self.get_summary(),
|
||||
"current_questions": self.get_current_questions()
|
||||
}
|
||||
|
||||
def validate(self) -> Dict[str, Any]:
|
||||
"""Validate the collected data before finalizing"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
# Required fields
|
||||
if not self.data.study_name:
|
||||
errors.append("Study name is required")
|
||||
|
||||
if not self.data.design_variables:
|
||||
errors.append("At least one design variable is required")
|
||||
|
||||
if not self.data.extractors:
|
||||
errors.append("At least one extractor is required")
|
||||
|
||||
if not self.data.objectives:
|
||||
errors.append("At least one objective is required")
|
||||
|
||||
# Warnings
|
||||
if not self.data.sim_file:
|
||||
warnings.append("No simulation file specified - you'll need to add one manually")
|
||||
|
||||
if not self.data.constraints:
|
||||
warnings.append("No constraints defined - optimization will be unconstrained")
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings
|
||||
}
|
||||
|
||||
def finalize(self) -> Dict[str, Any]:
|
||||
"""Generate the final atomizer_spec.json"""
|
||||
validation = self.validate()
|
||||
|
||||
if not validation["valid"]:
|
||||
return {
|
||||
"success": False,
|
||||
"errors": validation["errors"]
|
||||
}
|
||||
|
||||
spec = self.data.to_spec()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"spec": spec,
|
||||
"warnings": validation.get("warnings", [])
|
||||
}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Serialize engine state for persistence"""
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"data": {
|
||||
"study_name": self.data.study_name,
|
||||
"category": self.data.category,
|
||||
"description": self.data.description,
|
||||
"goals": self.data.goals,
|
||||
"sim_file": self.data.sim_file,
|
||||
"prt_file": self.data.prt_file,
|
||||
"solver_type": self.data.solver_type,
|
||||
"design_variables": self.data.design_variables,
|
||||
"extractors": self.data.extractors,
|
||||
"objectives": self.data.objectives,
|
||||
"constraints": self.data.constraints,
|
||||
"algorithm": self.data.algorithm,
|
||||
"max_trials": self.data.max_trials
|
||||
},
|
||||
"questions_asked": self.questions_asked,
|
||||
"errors": self.errors
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "InterviewEngine":
|
||||
"""Restore engine from serialized state"""
|
||||
engine = cls()
|
||||
engine.state = InterviewState(data.get("state", "not_started"))
|
||||
|
||||
d = data.get("data", {})
|
||||
engine.data.study_name = d.get("study_name")
|
||||
engine.data.category = d.get("category")
|
||||
engine.data.description = d.get("description")
|
||||
engine.data.goals = d.get("goals", [])
|
||||
engine.data.sim_file = d.get("sim_file")
|
||||
engine.data.prt_file = d.get("prt_file")
|
||||
engine.data.solver_type = d.get("solver_type", "nastran")
|
||||
engine.data.design_variables = d.get("design_variables", [])
|
||||
engine.data.extractors = d.get("extractors", [])
|
||||
engine.data.objectives = d.get("objectives", [])
|
||||
engine.data.constraints = d.get("constraints", [])
|
||||
engine.data.algorithm = d.get("algorithm", "TPE")
|
||||
engine.data.max_trials = d.get("max_trials", 100)
|
||||
|
||||
engine.questions_asked = data.get("questions_asked", [])
|
||||
engine.errors = data.get("errors", [])
|
||||
|
||||
return engine
|
||||
@@ -219,6 +219,18 @@ class SessionManager:
|
||||
full_response = result["stdout"] or ""
|
||||
|
||||
if full_response:
|
||||
# Check if response contains canvas modifications (from MCP tools)
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
modifications = self._extract_canvas_modifications(full_response)
|
||||
logger.info(f"[SEND_MSG] Found {len(modifications)} canvas modifications to send")
|
||||
|
||||
for mod in modifications:
|
||||
logger.info(f"[SEND_MSG] Sending canvas_modification: {mod.get('action')} {mod.get('nodeType')}")
|
||||
yield {"type": "canvas_modification", "modification": mod}
|
||||
|
||||
# Always send the text response
|
||||
yield {"type": "text", "content": full_response}
|
||||
|
||||
if result["returncode"] != 0 and result["stderr"]:
|
||||
@@ -292,6 +304,90 @@ class SessionManager:
|
||||
**({} if not db_record else {"db_record": db_record}),
|
||||
}
|
||||
|
||||
def _extract_canvas_modifications(self, response: str) -> List[Dict]:
|
||||
"""
|
||||
Extract canvas modification objects from Claude's response.
|
||||
|
||||
MCP tools like canvas_add_node return JSON with a 'modification' field.
|
||||
This method finds and extracts those modifications so the frontend can apply them.
|
||||
"""
|
||||
import re
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
modifications = []
|
||||
|
||||
# Debug: log what we're searching
|
||||
logger.info(f"[CANVAS_MOD] Searching response ({len(response)} chars) for modifications")
|
||||
|
||||
# Check if "modification" even exists in the response
|
||||
if '"modification"' not in response:
|
||||
logger.info("[CANVAS_MOD] No 'modification' key found in response")
|
||||
return modifications
|
||||
|
||||
try:
|
||||
# Method 1: Look for JSON in code fences
|
||||
code_block_pattern = r'```(?:json)?\s*([\s\S]*?)```'
|
||||
for match in re.finditer(code_block_pattern, response):
|
||||
block_content = match.group(1).strip()
|
||||
try:
|
||||
obj = json.loads(block_content)
|
||||
if isinstance(obj, dict) and 'modification' in obj:
|
||||
logger.info(f"[CANVAS_MOD] Found modification in code fence: {obj['modification']}")
|
||||
modifications.append(obj['modification'])
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Method 2: Find JSON objects using proper brace matching
|
||||
# This handles nested objects correctly
|
||||
i = 0
|
||||
while i < len(response):
|
||||
if response[i] == '{':
|
||||
# Found a potential JSON start, find matching close
|
||||
brace_count = 1
|
||||
j = i + 1
|
||||
in_string = False
|
||||
escape_next = False
|
||||
|
||||
while j < len(response) and brace_count > 0:
|
||||
char = response[j]
|
||||
|
||||
if escape_next:
|
||||
escape_next = False
|
||||
elif char == '\\':
|
||||
escape_next = True
|
||||
elif char == '"' and not escape_next:
|
||||
in_string = not in_string
|
||||
elif not in_string:
|
||||
if char == '{':
|
||||
brace_count += 1
|
||||
elif char == '}':
|
||||
brace_count -= 1
|
||||
j += 1
|
||||
|
||||
if brace_count == 0:
|
||||
potential_json = response[i:j]
|
||||
try:
|
||||
obj = json.loads(potential_json)
|
||||
if isinstance(obj, dict) and 'modification' in obj:
|
||||
mod = obj['modification']
|
||||
# Avoid duplicates
|
||||
if mod not in modifications:
|
||||
logger.info(f"[CANVAS_MOD] Found inline modification: action={mod.get('action')}, nodeType={mod.get('nodeType')}")
|
||||
modifications.append(mod)
|
||||
except json.JSONDecodeError as e:
|
||||
# Not valid JSON, skip
|
||||
pass
|
||||
i = j
|
||||
else:
|
||||
i += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[CANVAS_MOD] Error extracting modifications: {e}")
|
||||
|
||||
logger.info(f"[CANVAS_MOD] Extracted {len(modifications)} modification(s)")
|
||||
return modifications
|
||||
|
||||
def _build_mcp_config(self, mode: Literal["user", "power"]) -> dict:
|
||||
"""Build MCP configuration for Claude"""
|
||||
return {
|
||||
|
||||
747
atomizer-dashboard/backend/api/services/spec_manager.py
Normal file
747
atomizer-dashboard/backend/api/services/spec_manager.py
Normal file
@@ -0,0 +1,747 @@
|
||||
"""
|
||||
SpecManager Service
|
||||
|
||||
Central service for managing AtomizerSpec v2.0.
|
||||
All spec modifications flow through this service.
|
||||
|
||||
Features:
|
||||
- Load/save specs with validation
|
||||
- Atomic writes with conflict detection
|
||||
- Patch operations with JSONPath support
|
||||
- Node CRUD operations
|
||||
- Custom function support
|
||||
- WebSocket broadcast integration
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
# Add optimization_engine to path if needed
|
||||
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
|
||||
if str(ATOMIZER_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ATOMIZER_ROOT))
|
||||
|
||||
from optimization_engine.config.spec_models import (
|
||||
AtomizerSpec,
|
||||
DesignVariable,
|
||||
Extractor,
|
||||
Objective,
|
||||
Constraint,
|
||||
CanvasPosition,
|
||||
CanvasEdge,
|
||||
ExtractorType,
|
||||
CustomFunction,
|
||||
ExtractorOutput,
|
||||
ValidationReport,
|
||||
)
|
||||
from optimization_engine.config.spec_validator import (
|
||||
SpecValidator,
|
||||
SpecValidationError,
|
||||
)
|
||||
|
||||
|
||||
class SpecManagerError(Exception):
|
||||
"""Base error for SpecManager operations."""
|
||||
pass
|
||||
|
||||
|
||||
class SpecNotFoundError(SpecManagerError):
|
||||
"""Raised when spec file doesn't exist."""
|
||||
pass
|
||||
|
||||
|
||||
class SpecConflictError(SpecManagerError):
|
||||
"""Raised when spec has been modified by another client."""
|
||||
|
||||
def __init__(self, message: str, current_hash: str):
|
||||
super().__init__(message)
|
||||
self.current_hash = current_hash
|
||||
|
||||
|
||||
class WebSocketSubscriber:
|
||||
"""Protocol for WebSocket subscribers."""
|
||||
|
||||
async def send_json(self, data: Dict[str, Any]) -> None:
|
||||
"""Send JSON data to subscriber."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SpecManager:
|
||||
"""
|
||||
Central service for managing AtomizerSpec.
|
||||
|
||||
All modifications go through this service to ensure:
|
||||
- Validation on every change
|
||||
- Atomic file writes
|
||||
- Conflict detection via hashing
|
||||
- WebSocket broadcast to all clients
|
||||
"""
|
||||
|
||||
SPEC_FILENAME = "atomizer_spec.json"
|
||||
|
||||
def __init__(self, study_path: Union[str, Path]):
|
||||
"""
|
||||
Initialize SpecManager for a study.
|
||||
|
||||
Args:
|
||||
study_path: Path to the study directory
|
||||
"""
|
||||
self.study_path = Path(study_path)
|
||||
self.spec_path = self.study_path / self.SPEC_FILENAME
|
||||
self.validator = SpecValidator()
|
||||
self._subscribers: List[WebSocketSubscriber] = []
|
||||
self._last_hash: Optional[str] = None
|
||||
|
||||
# =========================================================================
|
||||
# Core CRUD Operations
|
||||
# =========================================================================
|
||||
|
||||
def load(self, validate: bool = True) -> AtomizerSpec:
|
||||
"""
|
||||
Load and optionally validate the spec.
|
||||
|
||||
Args:
|
||||
validate: Whether to validate the spec
|
||||
|
||||
Returns:
|
||||
AtomizerSpec instance
|
||||
|
||||
Raises:
|
||||
SpecNotFoundError: If spec file doesn't exist
|
||||
SpecValidationError: If validation fails
|
||||
"""
|
||||
if not self.spec_path.exists():
|
||||
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
|
||||
|
||||
with open(self.spec_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
if validate:
|
||||
self.validator.validate(data, strict=True)
|
||||
|
||||
spec = AtomizerSpec.model_validate(data)
|
||||
self._last_hash = self._compute_hash(data)
|
||||
return spec
|
||||
|
||||
def load_raw(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Load spec as raw dict without parsing.
|
||||
|
||||
Returns:
|
||||
Raw spec dict
|
||||
|
||||
Raises:
|
||||
SpecNotFoundError: If spec file doesn't exist
|
||||
"""
|
||||
if not self.spec_path.exists():
|
||||
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
|
||||
|
||||
with open(self.spec_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
|
||||
def save(
|
||||
self,
|
||||
spec: Union[AtomizerSpec, Dict[str, Any]],
|
||||
modified_by: str = "api",
|
||||
expected_hash: Optional[str] = None
|
||||
) -> str:
|
||||
"""
|
||||
Save spec with validation and broadcast.
|
||||
|
||||
Args:
|
||||
spec: Spec to save (AtomizerSpec or dict)
|
||||
modified_by: Who/what is making the change
|
||||
expected_hash: If provided, verify current file hash matches
|
||||
|
||||
Returns:
|
||||
New spec hash
|
||||
|
||||
Raises:
|
||||
SpecValidationError: If validation fails
|
||||
SpecConflictError: If expected_hash doesn't match current
|
||||
"""
|
||||
# Convert to dict if needed
|
||||
if isinstance(spec, AtomizerSpec):
|
||||
data = spec.model_dump(mode='json')
|
||||
else:
|
||||
data = spec
|
||||
|
||||
# Check for conflicts if expected_hash provided
|
||||
if expected_hash and self.spec_path.exists():
|
||||
current_hash = self.get_hash()
|
||||
if current_hash != expected_hash:
|
||||
raise SpecConflictError(
|
||||
"Spec was modified by another client",
|
||||
current_hash=current_hash
|
||||
)
|
||||
|
||||
# Update metadata
|
||||
now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
|
||||
data["meta"]["modified"] = now
|
||||
data["meta"]["modified_by"] = modified_by
|
||||
|
||||
# Validate
|
||||
self.validator.validate(data, strict=True)
|
||||
|
||||
# Compute new hash
|
||||
new_hash = self._compute_hash(data)
|
||||
|
||||
# Atomic write (write to temp, then rename)
|
||||
temp_path = self.spec_path.with_suffix('.tmp')
|
||||
with open(temp_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
temp_path.replace(self.spec_path)
|
||||
|
||||
# Update cached hash
|
||||
self._last_hash = new_hash
|
||||
|
||||
# Broadcast to subscribers
|
||||
self._broadcast({
|
||||
"type": "spec_updated",
|
||||
"hash": new_hash,
|
||||
"modified_by": modified_by,
|
||||
"timestamp": now
|
||||
})
|
||||
|
||||
return new_hash
|
||||
|
||||
def exists(self) -> bool:
|
||||
"""Check if spec file exists."""
|
||||
return self.spec_path.exists()
|
||||
|
||||
def get_hash(self) -> str:
|
||||
"""Get current spec hash."""
|
||||
if not self.spec_path.exists():
|
||||
return ""
|
||||
with open(self.spec_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return self._compute_hash(data)
|
||||
|
||||
def validate_and_report(self) -> ValidationReport:
|
||||
"""
|
||||
Run full validation and return detailed report.
|
||||
|
||||
Returns:
|
||||
ValidationReport with errors, warnings, summary
|
||||
"""
|
||||
if not self.spec_path.exists():
|
||||
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
|
||||
|
||||
data = self.load_raw()
|
||||
return self.validator.validate(data, strict=False)
|
||||
|
||||
# =========================================================================
|
||||
# Patch Operations
|
||||
# =========================================================================
|
||||
|
||||
def patch(
|
||||
self,
|
||||
path: str,
|
||||
value: Any,
|
||||
modified_by: str = "api"
|
||||
) -> AtomizerSpec:
|
||||
"""
|
||||
Apply a JSONPath-style modification.
|
||||
|
||||
Args:
|
||||
path: JSONPath like "design_variables[0].bounds.max"
|
||||
value: New value to set
|
||||
modified_by: Who/what is making the change
|
||||
|
||||
Returns:
|
||||
Updated AtomizerSpec
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Validate the partial update
|
||||
spec = AtomizerSpec.model_validate(data)
|
||||
is_valid, errors = self.validator.validate_partial(path, value, spec)
|
||||
if not is_valid:
|
||||
raise SpecValidationError(f"Invalid update: {'; '.join(errors)}")
|
||||
|
||||
# Apply the patch
|
||||
self._apply_patch(data, path, value)
|
||||
|
||||
# Save and return
|
||||
self.save(data, modified_by)
|
||||
return self.load(validate=False)
|
||||
|
||||
def _apply_patch(self, data: Dict, path: str, value: Any) -> None:
|
||||
"""
|
||||
Apply a patch to the data dict.
|
||||
|
||||
Supports paths like:
|
||||
- "meta.description"
|
||||
- "design_variables[0].bounds.max"
|
||||
- "objectives[1].weight"
|
||||
"""
|
||||
parts = self._parse_path(path)
|
||||
if not parts:
|
||||
raise ValueError(f"Invalid path: {path}")
|
||||
|
||||
# Navigate to parent
|
||||
current = data
|
||||
for part in parts[:-1]:
|
||||
if isinstance(current, list):
|
||||
idx = int(part)
|
||||
current = current[idx]
|
||||
else:
|
||||
current = current[part]
|
||||
|
||||
# Set final value
|
||||
final_key = parts[-1]
|
||||
if isinstance(current, list):
|
||||
idx = int(final_key)
|
||||
current[idx] = value
|
||||
else:
|
||||
current[final_key] = value
|
||||
|
||||
def _parse_path(self, path: str) -> List[str]:
|
||||
"""Parse JSONPath into parts."""
|
||||
# Handle both dot notation and bracket notation
|
||||
parts = []
|
||||
for part in re.split(r'\.|\[|\]', path):
|
||||
if part:
|
||||
parts.append(part)
|
||||
return parts
|
||||
|
||||
# =========================================================================
|
||||
# Node Operations
|
||||
# =========================================================================
|
||||
|
||||
def add_node(
|
||||
self,
|
||||
node_type: str,
|
||||
node_data: Dict[str, Any],
|
||||
modified_by: str = "canvas"
|
||||
) -> str:
|
||||
"""
|
||||
Add a new node (design var, extractor, objective, constraint).
|
||||
|
||||
Args:
|
||||
node_type: One of 'designVar', 'extractor', 'objective', 'constraint'
|
||||
node_data: Node data without ID
|
||||
modified_by: Who/what is making the change
|
||||
|
||||
Returns:
|
||||
Generated node ID
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Generate ID
|
||||
node_id = self._generate_id(node_type, data)
|
||||
node_data["id"] = node_id
|
||||
|
||||
# Add canvas position if not provided
|
||||
if "canvas_position" not in node_data:
|
||||
node_data["canvas_position"] = self._auto_position(node_type, data)
|
||||
|
||||
# Add to appropriate section
|
||||
section = self._get_section_for_type(node_type)
|
||||
|
||||
if section not in data or data[section] is None:
|
||||
data[section] = []
|
||||
|
||||
data[section].append(node_data)
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
# Broadcast node addition
|
||||
self._broadcast({
|
||||
"type": "node_added",
|
||||
"node_type": node_type,
|
||||
"node_id": node_id,
|
||||
"modified_by": modified_by
|
||||
})
|
||||
|
||||
return node_id
|
||||
|
||||
def update_node(
|
||||
self,
|
||||
node_id: str,
|
||||
updates: Dict[str, Any],
|
||||
modified_by: str = "canvas"
|
||||
) -> None:
|
||||
"""
|
||||
Update an existing node.
|
||||
|
||||
Args:
|
||||
node_id: ID of the node to update
|
||||
updates: Dict of fields to update
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Find and update the node
|
||||
found = False
|
||||
for section in ["design_variables", "extractors", "objectives", "constraints"]:
|
||||
if section not in data or data[section] is None:
|
||||
continue
|
||||
for node in data[section]:
|
||||
if node.get("id") == node_id:
|
||||
node.update(updates)
|
||||
found = True
|
||||
break
|
||||
if found:
|
||||
break
|
||||
|
||||
if not found:
|
||||
raise SpecManagerError(f"Node not found: {node_id}")
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
def remove_node(
|
||||
self,
|
||||
node_id: str,
|
||||
modified_by: str = "canvas"
|
||||
) -> None:
|
||||
"""
|
||||
Remove a node and all edges referencing it.
|
||||
|
||||
Args:
|
||||
node_id: ID of the node to remove
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Find and remove node
|
||||
removed = False
|
||||
for section in ["design_variables", "extractors", "objectives", "constraints"]:
|
||||
if section not in data or data[section] is None:
|
||||
continue
|
||||
original_len = len(data[section])
|
||||
data[section] = [n for n in data[section] if n.get("id") != node_id]
|
||||
if len(data[section]) < original_len:
|
||||
removed = True
|
||||
break
|
||||
|
||||
if not removed:
|
||||
raise SpecManagerError(f"Node not found: {node_id}")
|
||||
|
||||
# Remove edges referencing this node
|
||||
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
|
||||
data["canvas"]["edges"] = [
|
||||
e for e in data["canvas"]["edges"]
|
||||
if e.get("source") != node_id and e.get("target") != node_id
|
||||
]
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
# Broadcast node removal
|
||||
self._broadcast({
|
||||
"type": "node_removed",
|
||||
"node_id": node_id,
|
||||
"modified_by": modified_by
|
||||
})
|
||||
|
||||
def update_node_position(
|
||||
self,
|
||||
node_id: str,
|
||||
position: Dict[str, float],
|
||||
modified_by: str = "canvas"
|
||||
) -> None:
|
||||
"""
|
||||
Update a node's canvas position.
|
||||
|
||||
Args:
|
||||
node_id: ID of the node
|
||||
position: Dict with x, y coordinates
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
self.update_node(node_id, {"canvas_position": position}, modified_by)
|
||||
|
||||
def add_edge(
|
||||
self,
|
||||
source: str,
|
||||
target: str,
|
||||
modified_by: str = "canvas"
|
||||
) -> None:
|
||||
"""
|
||||
Add a canvas edge between nodes.
|
||||
|
||||
Args:
|
||||
source: Source node ID
|
||||
target: Target node ID
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Initialize canvas section if needed
|
||||
if "canvas" not in data or data["canvas"] is None:
|
||||
data["canvas"] = {}
|
||||
if "edges" not in data["canvas"] or data["canvas"]["edges"] is None:
|
||||
data["canvas"]["edges"] = []
|
||||
|
||||
# Check for duplicate
|
||||
for edge in data["canvas"]["edges"]:
|
||||
if edge.get("source") == source and edge.get("target") == target:
|
||||
return # Already exists
|
||||
|
||||
data["canvas"]["edges"].append({
|
||||
"source": source,
|
||||
"target": target
|
||||
})
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
def remove_edge(
|
||||
self,
|
||||
source: str,
|
||||
target: str,
|
||||
modified_by: str = "canvas"
|
||||
) -> None:
|
||||
"""
|
||||
Remove a canvas edge.
|
||||
|
||||
Args:
|
||||
source: Source node ID
|
||||
target: Target node ID
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
|
||||
data["canvas"]["edges"] = [
|
||||
e for e in data["canvas"]["edges"]
|
||||
if not (e.get("source") == source and e.get("target") == target)
|
||||
]
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
# =========================================================================
|
||||
# Custom Function Support
|
||||
# =========================================================================
|
||||
|
||||
def add_custom_function(
|
||||
self,
|
||||
name: str,
|
||||
code: str,
|
||||
outputs: List[str],
|
||||
description: Optional[str] = None,
|
||||
modified_by: str = "claude"
|
||||
) -> str:
|
||||
"""
|
||||
Add a custom extractor function.
|
||||
|
||||
Args:
|
||||
name: Function name
|
||||
code: Python source code
|
||||
outputs: List of output names
|
||||
description: Optional description
|
||||
modified_by: Who/what is making the change
|
||||
|
||||
Returns:
|
||||
Generated extractor ID
|
||||
|
||||
Raises:
|
||||
SpecValidationError: If Python syntax is invalid
|
||||
"""
|
||||
# Validate Python syntax
|
||||
try:
|
||||
compile(code, f"<custom:{name}>", "exec")
|
||||
except SyntaxError as e:
|
||||
raise SpecValidationError(
|
||||
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
|
||||
)
|
||||
|
||||
data = self.load_raw()
|
||||
|
||||
# Generate extractor ID
|
||||
ext_id = self._generate_id("extractor", data)
|
||||
|
||||
# Create extractor
|
||||
extractor = {
|
||||
"id": ext_id,
|
||||
"name": description or f"Custom: {name}",
|
||||
"type": "custom_function",
|
||||
"builtin": False,
|
||||
"function": {
|
||||
"name": name,
|
||||
"module": "custom_extractors.dynamic",
|
||||
"source_code": code
|
||||
},
|
||||
"outputs": [{"name": o, "metric": "custom"} for o in outputs],
|
||||
"canvas_position": self._auto_position("extractor", data)
|
||||
}
|
||||
|
||||
data["extractors"].append(extractor)
|
||||
self.save(data, modified_by)
|
||||
|
||||
return ext_id
|
||||
|
||||
def update_custom_function(
|
||||
self,
|
||||
extractor_id: str,
|
||||
code: Optional[str] = None,
|
||||
outputs: Optional[List[str]] = None,
|
||||
modified_by: str = "claude"
|
||||
) -> None:
|
||||
"""
|
||||
Update an existing custom function.
|
||||
|
||||
Args:
|
||||
extractor_id: ID of the custom extractor
|
||||
code: New Python code (optional)
|
||||
outputs: New outputs (optional)
|
||||
modified_by: Who/what is making the change
|
||||
"""
|
||||
data = self.load_raw()
|
||||
|
||||
# Find the extractor
|
||||
extractor = None
|
||||
for ext in data.get("extractors", []):
|
||||
if ext.get("id") == extractor_id:
|
||||
extractor = ext
|
||||
break
|
||||
|
||||
if not extractor:
|
||||
raise SpecManagerError(f"Extractor not found: {extractor_id}")
|
||||
|
||||
if extractor.get("type") != "custom_function":
|
||||
raise SpecManagerError(f"Extractor {extractor_id} is not a custom function")
|
||||
|
||||
# Update code
|
||||
if code is not None:
|
||||
try:
|
||||
compile(code, f"<custom:{extractor_id}>", "exec")
|
||||
except SyntaxError as e:
|
||||
raise SpecValidationError(
|
||||
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
|
||||
)
|
||||
if "function" not in extractor:
|
||||
extractor["function"] = {}
|
||||
extractor["function"]["source_code"] = code
|
||||
|
||||
# Update outputs
|
||||
if outputs is not None:
|
||||
extractor["outputs"] = [{"name": o, "metric": "custom"} for o in outputs]
|
||||
|
||||
self.save(data, modified_by)
|
||||
|
||||
# =========================================================================
|
||||
# WebSocket Subscription
|
||||
# =========================================================================
|
||||
|
||||
def subscribe(self, subscriber: WebSocketSubscriber) -> None:
|
||||
"""Subscribe to spec changes."""
|
||||
if subscriber not in self._subscribers:
|
||||
self._subscribers.append(subscriber)
|
||||
|
||||
def unsubscribe(self, subscriber: WebSocketSubscriber) -> None:
|
||||
"""Unsubscribe from spec changes."""
|
||||
if subscriber in self._subscribers:
|
||||
self._subscribers.remove(subscriber)
|
||||
|
||||
def _broadcast(self, message: Dict[str, Any]) -> None:
|
||||
"""Broadcast message to all subscribers."""
|
||||
import asyncio
|
||||
|
||||
for subscriber in self._subscribers:
|
||||
try:
|
||||
# Handle both sync and async contexts
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.create_task(subscriber.send_json(message))
|
||||
except RuntimeError:
|
||||
# No running loop, try direct call if possible
|
||||
pass
|
||||
except Exception:
|
||||
# Subscriber may have disconnected
|
||||
pass
|
||||
|
||||
# =========================================================================
|
||||
# Helper Methods
|
||||
# =========================================================================
|
||||
|
||||
def _compute_hash(self, data: Dict) -> str:
|
||||
"""Compute hash of spec data for conflict detection."""
|
||||
# Sort keys for consistent hashing
|
||||
json_str = json.dumps(data, sort_keys=True, ensure_ascii=False)
|
||||
return hashlib.sha256(json_str.encode()).hexdigest()[:16]
|
||||
|
||||
def _generate_id(self, node_type: str, data: Dict) -> str:
|
||||
"""Generate unique ID for a node type."""
|
||||
prefix_map = {
|
||||
"designVar": "dv",
|
||||
"design_variable": "dv",
|
||||
"extractor": "ext",
|
||||
"objective": "obj",
|
||||
"constraint": "con"
|
||||
}
|
||||
prefix = prefix_map.get(node_type, node_type[:3])
|
||||
|
||||
# Find existing IDs
|
||||
section = self._get_section_for_type(node_type)
|
||||
existing_ids: Set[str] = set()
|
||||
if section in data and data[section]:
|
||||
existing_ids = {n.get("id", "") for n in data[section]}
|
||||
|
||||
# Generate next available ID
|
||||
for i in range(1, 1000):
|
||||
new_id = f"{prefix}_{i:03d}"
|
||||
if new_id not in existing_ids:
|
||||
return new_id
|
||||
|
||||
raise SpecManagerError(f"Cannot generate ID for {node_type}: too many nodes")
|
||||
|
||||
def _get_section_for_type(self, node_type: str) -> str:
|
||||
"""Map node type to spec section name."""
|
||||
section_map = {
|
||||
"designVar": "design_variables",
|
||||
"design_variable": "design_variables",
|
||||
"extractor": "extractors",
|
||||
"objective": "objectives",
|
||||
"constraint": "constraints"
|
||||
}
|
||||
return section_map.get(node_type, node_type + "s")
|
||||
|
||||
def _auto_position(self, node_type: str, data: Dict) -> Dict[str, float]:
|
||||
"""Calculate auto position for a new node."""
|
||||
# Default x positions by type
|
||||
x_positions = {
|
||||
"designVar": 50,
|
||||
"design_variable": 50,
|
||||
"extractor": 740,
|
||||
"objective": 1020,
|
||||
"constraint": 1020
|
||||
}
|
||||
|
||||
x = x_positions.get(node_type, 400)
|
||||
|
||||
# Find max y position for this type
|
||||
section = self._get_section_for_type(node_type)
|
||||
max_y = 0
|
||||
if section in data and data[section]:
|
||||
for node in data[section]:
|
||||
pos = node.get("canvas_position", {})
|
||||
y = pos.get("y", 0)
|
||||
if y > max_y:
|
||||
max_y = y
|
||||
|
||||
# Place below existing nodes
|
||||
y = max_y + 100 if max_y > 0 else 100
|
||||
|
||||
return {"x": x, "y": y}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Factory Function
|
||||
# =========================================================================
|
||||
|
||||
def get_spec_manager(study_path: Union[str, Path]) -> SpecManager:
|
||||
"""
|
||||
Get a SpecManager instance for a study.
|
||||
|
||||
Args:
|
||||
study_path: Path to the study directory
|
||||
|
||||
Returns:
|
||||
SpecManager instance
|
||||
"""
|
||||
return SpecManager(study_path)
|
||||
Binary file not shown.
@@ -30,6 +30,7 @@ function App() {
|
||||
|
||||
{/* Canvas page - full screen, no sidebar */}
|
||||
<Route path="canvas" element={<CanvasView />} />
|
||||
<Route path="canvas/*" element={<CanvasView />} />
|
||||
|
||||
{/* Study pages - with sidebar layout */}
|
||||
<Route element={<MainLayout />}>
|
||||
|
||||
@@ -26,8 +26,8 @@ interface DesignVariable {
|
||||
name: string;
|
||||
parameter?: string; // Optional: the actual parameter name if different from name
|
||||
unit?: string;
|
||||
min: number;
|
||||
max: number;
|
||||
min?: number;
|
||||
max?: number;
|
||||
}
|
||||
|
||||
interface Constraint {
|
||||
|
||||
@@ -8,14 +8,15 @@ import { ScatterChart, Scatter, Line, XAxis, YAxis, CartesianGrid, Tooltip, Cell
|
||||
|
||||
interface ParetoTrial {
|
||||
trial_number: number;
|
||||
values: [number, number];
|
||||
values: number[]; // Support variable number of objectives
|
||||
params: Record<string, number>;
|
||||
constraint_satisfied?: boolean;
|
||||
}
|
||||
|
||||
interface Objective {
|
||||
name: string;
|
||||
type: 'minimize' | 'maximize';
|
||||
type?: 'minimize' | 'maximize';
|
||||
direction?: 'minimize' | 'maximize'; // Alternative field used by some configs
|
||||
unit?: string;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* ConnectionStatusIndicator - Visual indicator for WebSocket connection status.
|
||||
*/
|
||||
|
||||
import { ConnectionStatus } from '../../hooks/useSpecWebSocket';
|
||||
|
||||
interface ConnectionStatusIndicatorProps {
|
||||
status: ConnectionStatus;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Visual indicator for WebSocket connection status.
|
||||
* Can be used in the canvas UI to show sync state.
|
||||
*/
|
||||
export function ConnectionStatusIndicator({
|
||||
status,
|
||||
className = '',
|
||||
}: ConnectionStatusIndicatorProps) {
|
||||
const statusConfig = {
|
||||
disconnected: {
|
||||
color: 'bg-gray-500',
|
||||
label: 'Disconnected',
|
||||
},
|
||||
connecting: {
|
||||
color: 'bg-yellow-500 animate-pulse',
|
||||
label: 'Connecting...',
|
||||
},
|
||||
connected: {
|
||||
color: 'bg-green-500',
|
||||
label: 'Connected',
|
||||
},
|
||||
reconnecting: {
|
||||
color: 'bg-yellow-500 animate-pulse',
|
||||
label: 'Reconnecting...',
|
||||
},
|
||||
};
|
||||
|
||||
const config = statusConfig[status];
|
||||
|
||||
return (
|
||||
<div className={`flex items-center gap-2 ${className}`}>
|
||||
<div className={`w-2 h-2 rounded-full ${config.color}`} />
|
||||
<span className="text-xs text-dark-400">{config.label}</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default ConnectionStatusIndicator;
|
||||
@@ -0,0 +1,67 @@
|
||||
/**
|
||||
* ResizeHandle - Visual drag handle for resizable panels
|
||||
*
|
||||
* A thin vertical bar that can be dragged to resize panels.
|
||||
* Shows visual feedback on hover and during drag.
|
||||
*/
|
||||
|
||||
import { memo } from 'react';
|
||||
|
||||
interface ResizeHandleProps {
|
||||
/** Mouse down handler to start dragging */
|
||||
onMouseDown: (e: React.MouseEvent) => void;
|
||||
/** Double click handler to reset size */
|
||||
onDoubleClick?: () => void;
|
||||
/** Whether panel is currently being dragged */
|
||||
isDragging?: boolean;
|
||||
/** Position of the handle ('left' or 'right' edge of the panel) */
|
||||
position?: 'left' | 'right';
|
||||
}
|
||||
|
||||
function ResizeHandleComponent({
|
||||
onMouseDown,
|
||||
onDoubleClick,
|
||||
isDragging = false,
|
||||
position = 'right',
|
||||
}: ResizeHandleProps) {
|
||||
return (
|
||||
<div
|
||||
className={`
|
||||
absolute top-0 bottom-0 w-1 z-30
|
||||
cursor-col-resize
|
||||
transition-colors duration-150
|
||||
${position === 'right' ? 'right-0' : 'left-0'}
|
||||
${isDragging
|
||||
? 'bg-primary-500'
|
||||
: 'bg-transparent hover:bg-primary-500/50'
|
||||
}
|
||||
`}
|
||||
onMouseDown={onMouseDown}
|
||||
onDoubleClick={onDoubleClick}
|
||||
title="Drag to resize, double-click to reset"
|
||||
>
|
||||
{/* Wider hit area for easier grabbing */}
|
||||
<div
|
||||
className={`
|
||||
absolute top-0 bottom-0 w-3
|
||||
${position === 'right' ? '-left-1' : '-right-1'}
|
||||
`}
|
||||
/>
|
||||
|
||||
{/* Visual indicator dots (shown on hover via CSS) */}
|
||||
<div className={`
|
||||
absolute top-1/2 -translate-y-1/2
|
||||
${position === 'right' ? '-left-0.5' : '-right-0.5'}
|
||||
flex flex-col gap-1 opacity-0 hover:opacity-100 transition-opacity
|
||||
${isDragging ? 'opacity-100' : ''}
|
||||
`}>
|
||||
<div className="w-1 h-1 rounded-full bg-dark-400" />
|
||||
<div className="w-1 h-1 rounded-full bg-dark-400" />
|
||||
<div className="w-1 h-1 rounded-full bg-dark-400" />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export const ResizeHandle = memo(ResizeHandleComponent);
|
||||
export default ResizeHandle;
|
||||
@@ -10,7 +10,8 @@
|
||||
* P2.7-P2.10: SpecRenderer component with node/edge/selection handling
|
||||
*/
|
||||
|
||||
import { useCallback, useRef, useEffect, useMemo, DragEvent } from 'react';
|
||||
import { useCallback, useRef, useEffect, useMemo, useState, DragEvent } from 'react';
|
||||
import { Play, Square, Loader2, Eye, EyeOff, CheckCircle, AlertCircle } from 'lucide-react';
|
||||
import ReactFlow, {
|
||||
Background,
|
||||
Controls,
|
||||
@@ -22,6 +23,7 @@ import ReactFlow, {
|
||||
NodeChange,
|
||||
EdgeChange,
|
||||
Connection,
|
||||
applyNodeChanges,
|
||||
} from 'reactflow';
|
||||
import 'reactflow/dist/style.css';
|
||||
|
||||
@@ -36,23 +38,34 @@ import {
|
||||
useSelectedEdgeId,
|
||||
} from '../../hooks/useSpecStore';
|
||||
import { useSpecWebSocket } from '../../hooks/useSpecWebSocket';
|
||||
import { usePanelStore } from '../../hooks/usePanelStore';
|
||||
import { useOptimizationStream } from '../../hooks/useOptimizationStream';
|
||||
import { ConnectionStatusIndicator } from './ConnectionStatusIndicator';
|
||||
import { ProgressRing } from './visualization/ConvergenceSparkline';
|
||||
import { CanvasNodeData } from '../../lib/canvas/schema';
|
||||
import { validateSpec, canRunOptimization } from '../../lib/validation/specValidator';
|
||||
|
||||
// ============================================================================
|
||||
// Drag-Drop Helpers
|
||||
// ============================================================================
|
||||
|
||||
/** Addable node types via drag-drop */
|
||||
const ADDABLE_NODE_TYPES = ['designVar', 'extractor', 'objective', 'constraint'] as const;
|
||||
import { SINGLETON_TYPES } from './palette/NodePalette';
|
||||
|
||||
/** All node types that can be added via drag-drop */
|
||||
const ADDABLE_NODE_TYPES = ['model', 'solver', 'designVar', 'extractor', 'objective', 'constraint', 'algorithm', 'surrogate'] as const;
|
||||
type AddableNodeType = typeof ADDABLE_NODE_TYPES[number];
|
||||
|
||||
function isAddableNodeType(type: string): type is AddableNodeType {
|
||||
return ADDABLE_NODE_TYPES.includes(type as AddableNodeType);
|
||||
}
|
||||
|
||||
/** Check if a node type is a singleton (only one allowed) */
|
||||
function isSingletonType(type: string): boolean {
|
||||
return SINGLETON_TYPES.includes(type as typeof SINGLETON_TYPES[number]);
|
||||
}
|
||||
|
||||
/** Maps canvas NodeType to spec API type */
|
||||
function mapNodeTypeToSpecType(type: AddableNodeType): 'designVar' | 'extractor' | 'objective' | 'constraint' {
|
||||
function mapNodeTypeToSpecType(type: AddableNodeType): 'designVar' | 'extractor' | 'objective' | 'constraint' | 'model' | 'solver' | 'algorithm' | 'surrogate' {
|
||||
return type;
|
||||
}
|
||||
|
||||
@@ -61,6 +74,22 @@ function getDefaultNodeData(type: AddableNodeType, position: { x: number; y: num
|
||||
const timestamp = Date.now();
|
||||
|
||||
switch (type) {
|
||||
case 'model':
|
||||
return {
|
||||
name: 'Model',
|
||||
sim: {
|
||||
path: '',
|
||||
solver: 'nastran',
|
||||
},
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'solver':
|
||||
return {
|
||||
name: 'Solver',
|
||||
engine: 'nxnastran',
|
||||
solution_type: 'SOL101',
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'designVar':
|
||||
return {
|
||||
name: `variable_${timestamp}`,
|
||||
@@ -74,8 +103,28 @@ function getDefaultNodeData(type: AddableNodeType, position: { x: number; y: num
|
||||
case 'extractor':
|
||||
return {
|
||||
name: `extractor_${timestamp}`,
|
||||
type: 'custom',
|
||||
type: 'custom_function', // Must be valid ExtractorType
|
||||
builtin: false,
|
||||
enabled: true,
|
||||
// Custom function extractors need a function definition
|
||||
function: {
|
||||
name: 'extract',
|
||||
source_code: `def extract(op2_path: str, config: dict = None) -> dict:
|
||||
"""
|
||||
Custom extractor function.
|
||||
|
||||
Args:
|
||||
op2_path: Path to the OP2 results file
|
||||
config: Optional configuration dict
|
||||
|
||||
Returns:
|
||||
Dictionary with extracted values
|
||||
"""
|
||||
# TODO: Implement extraction logic
|
||||
return {'value': 0.0}
|
||||
`,
|
||||
},
|
||||
outputs: [{ name: 'value', metric: 'custom' }],
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'objective':
|
||||
@@ -83,20 +132,44 @@ function getDefaultNodeData(type: AddableNodeType, position: { x: number; y: num
|
||||
name: `objective_${timestamp}`,
|
||||
direction: 'minimize',
|
||||
weight: 1.0,
|
||||
source_extractor_id: null,
|
||||
source_output: null,
|
||||
// Source is required - use placeholder that user must configure
|
||||
source: {
|
||||
extractor_id: 'ext_001', // Placeholder - user needs to configure
|
||||
output_name: 'value',
|
||||
},
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'constraint':
|
||||
return {
|
||||
name: `constraint_${timestamp}`,
|
||||
type: 'upper',
|
||||
limit: 1.0,
|
||||
source_extractor_id: null,
|
||||
source_output: null,
|
||||
type: 'hard', // Must be 'hard' or 'soft' (field is 'type' not 'constraint_type')
|
||||
operator: '<=',
|
||||
threshold: 1.0, // Field is 'threshold' not 'limit'
|
||||
// Source is required
|
||||
source: {
|
||||
extractor_id: 'ext_001', // Placeholder - user needs to configure
|
||||
output_name: 'value',
|
||||
},
|
||||
enabled: true,
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'algorithm':
|
||||
return {
|
||||
name: 'Algorithm',
|
||||
type: 'TPE',
|
||||
budget: {
|
||||
max_trials: 100,
|
||||
},
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'surrogate':
|
||||
return {
|
||||
name: 'Surrogate',
|
||||
enabled: false,
|
||||
model_type: 'MLP',
|
||||
min_trials: 20,
|
||||
canvas_position: position,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,6 +246,161 @@ function SpecRendererInner({
|
||||
const wsStudyId = enableWebSocket ? storeStudyId : null;
|
||||
const { status: wsStatus } = useSpecWebSocket(wsStudyId);
|
||||
|
||||
// Panel store for validation and error panels
|
||||
const { setValidationData, addError, openPanel } = usePanelStore();
|
||||
|
||||
// Optimization WebSocket stream for real-time updates
|
||||
const {
|
||||
status: optimizationStatus,
|
||||
progress: wsProgress,
|
||||
bestTrial: wsBestTrial,
|
||||
recentTrials,
|
||||
} = useOptimizationStream(studyId, {
|
||||
autoReportErrors: true,
|
||||
onTrialComplete: (trial) => {
|
||||
console.log('[SpecRenderer] Trial completed:', trial.trial_number);
|
||||
},
|
||||
onNewBest: (best) => {
|
||||
console.log('[SpecRenderer] New best found:', best.value);
|
||||
setShowResults(true); // Auto-show results when new best found
|
||||
},
|
||||
});
|
||||
|
||||
// Optimization execution state
|
||||
const isRunning = optimizationStatus === 'running';
|
||||
const [isStarting, setIsStarting] = useState(false);
|
||||
const [showResults, setShowResults] = useState(false);
|
||||
const [validationStatus, setValidationStatus] = useState<'valid' | 'invalid' | 'unchecked'>('unchecked');
|
||||
|
||||
// Build trial history for sparklines (extract objective values from recent trials)
|
||||
const trialHistory = useMemo(() => {
|
||||
const history: Record<string, number[]> = {};
|
||||
for (const trial of recentTrials) {
|
||||
// Map objective values - assumes single objective for now
|
||||
if (trial.objective !== null) {
|
||||
const key = 'primary';
|
||||
if (!history[key]) history[key] = [];
|
||||
history[key].push(trial.objective);
|
||||
}
|
||||
// Could also extract individual params/results for multi-objective
|
||||
}
|
||||
// Reverse so oldest is first (for sparkline)
|
||||
for (const key of Object.keys(history)) {
|
||||
history[key].reverse();
|
||||
}
|
||||
return history;
|
||||
}, [recentTrials]);
|
||||
|
||||
// Build best trial data for node display
|
||||
const bestTrial = useMemo((): {
|
||||
trial_number: number;
|
||||
objective: number;
|
||||
design_variables: Record<string, number>;
|
||||
results: Record<string, number>;
|
||||
} | null => {
|
||||
if (!wsBestTrial) return null;
|
||||
return {
|
||||
trial_number: wsBestTrial.trial_number,
|
||||
objective: wsBestTrial.value,
|
||||
design_variables: wsBestTrial.params,
|
||||
results: { primary: wsBestTrial.value, ...wsBestTrial.params },
|
||||
};
|
||||
}, [wsBestTrial]);
|
||||
|
||||
// Note: Polling removed - now using WebSocket via useOptimizationStream hook
|
||||
// The hook handles: status updates, best trial updates, error reporting
|
||||
|
||||
// Validate the spec and show results in panel
|
||||
const handleValidate = useCallback(() => {
|
||||
if (!spec) return;
|
||||
|
||||
const result = validateSpec(spec);
|
||||
setValidationData(result);
|
||||
setValidationStatus(result.valid ? 'valid' : 'invalid');
|
||||
|
||||
// Auto-open validation panel if there are issues
|
||||
if (!result.valid || result.warnings.length > 0) {
|
||||
openPanel('validation');
|
||||
}
|
||||
|
||||
return result;
|
||||
}, [spec, setValidationData, openPanel]);
|
||||
|
||||
const handleRun = async () => {
|
||||
if (!studyId || !spec) return;
|
||||
|
||||
// Validate before running
|
||||
const validation = handleValidate();
|
||||
if (!validation || !validation.valid) {
|
||||
// Show validation panel with errors
|
||||
return;
|
||||
}
|
||||
|
||||
// Also do a quick sanity check
|
||||
const { canRun, reason } = canRunOptimization(spec);
|
||||
if (!canRun) {
|
||||
addError({
|
||||
type: 'config_error',
|
||||
message: reason || 'Cannot run optimization',
|
||||
recoverable: false,
|
||||
suggestions: ['Check the validation panel for details'],
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setIsStarting(true);
|
||||
try {
|
||||
const res = await fetch(`/api/optimization/studies/${studyId}/run`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ trials: spec?.optimization?.budget?.max_trials || 50 })
|
||||
});
|
||||
if (!res.ok) {
|
||||
const err = await res.json();
|
||||
throw new Error(err.detail || 'Failed to start');
|
||||
}
|
||||
// isRunning is now derived from WebSocket state (optimizationStatus === 'running')
|
||||
setValidationStatus('unchecked'); // Clear validation status when running
|
||||
} catch (e) {
|
||||
const errorMessage = e instanceof Error ? e.message : 'Failed to start optimization';
|
||||
setError(errorMessage);
|
||||
|
||||
// Also add to error panel for persistence
|
||||
addError({
|
||||
type: 'system_error',
|
||||
message: errorMessage,
|
||||
recoverable: true,
|
||||
suggestions: ['Check if the backend is running', 'Verify the study configuration'],
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
} finally {
|
||||
setIsStarting(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleStop = async () => {
|
||||
if (!studyId) return;
|
||||
try {
|
||||
const res = await fetch(`/api/optimization/studies/${studyId}/stop`, { method: 'POST' });
|
||||
if (!res.ok) {
|
||||
const err = await res.json().catch(() => ({}));
|
||||
throw new Error(err.detail || 'Failed to stop');
|
||||
}
|
||||
// isRunning will update via WebSocket when optimization actually stops
|
||||
} catch (e) {
|
||||
const errorMessage = e instanceof Error ? e.message : 'Failed to stop optimization';
|
||||
setError(errorMessage);
|
||||
addError({
|
||||
type: 'system_error',
|
||||
message: errorMessage,
|
||||
recoverable: false,
|
||||
suggestions: ['The optimization may still be running in the background'],
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Load spec on mount if studyId provided
|
||||
useEffect(() => {
|
||||
if (studyId) {
|
||||
@@ -186,8 +414,58 @@ function SpecRendererInner({
|
||||
|
||||
// Convert spec to ReactFlow nodes
|
||||
const nodes = useMemo(() => {
|
||||
return specToNodes(spec);
|
||||
}, [spec]);
|
||||
const baseNodes = specToNodes(spec);
|
||||
|
||||
// Always map nodes to include history for sparklines (even if not showing results)
|
||||
return baseNodes.map(node => {
|
||||
// Create a mutable copy with explicit any type for dynamic property assignment
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const newData: any = { ...node.data };
|
||||
|
||||
// Add history for sparklines on objective nodes
|
||||
if (node.type === 'objective') {
|
||||
newData.history = trialHistory['primary'] || [];
|
||||
}
|
||||
|
||||
// Map results to nodes when showing results
|
||||
if (showResults && bestTrial) {
|
||||
if (node.type === 'designVar' && newData.expressionName) {
|
||||
const val = bestTrial.design_variables?.[newData.expressionName];
|
||||
if (val !== undefined) newData.resultValue = val;
|
||||
} else if (node.type === 'objective') {
|
||||
const outputName = newData.outputName;
|
||||
if (outputName && bestTrial.results?.[outputName] !== undefined) {
|
||||
newData.resultValue = bestTrial.results[outputName];
|
||||
}
|
||||
} else if (node.type === 'constraint') {
|
||||
const outputName = newData.outputName;
|
||||
if (outputName && bestTrial.results?.[outputName] !== undefined) {
|
||||
const val = bestTrial.results[outputName];
|
||||
newData.resultValue = val;
|
||||
|
||||
// Check feasibility
|
||||
const op = newData.operator;
|
||||
const threshold = newData.value;
|
||||
if (op === '<=' && threshold !== undefined) newData.isFeasible = val <= threshold;
|
||||
else if (op === '>=' && threshold !== undefined) newData.isFeasible = val >= threshold;
|
||||
else if (op === '<' && threshold !== undefined) newData.isFeasible = val < threshold;
|
||||
else if (op === '>' && threshold !== undefined) newData.isFeasible = val > threshold;
|
||||
else if (op === '==' && threshold !== undefined) newData.isFeasible = Math.abs(val - threshold) < 1e-6;
|
||||
}
|
||||
} else if (node.type === 'extractor') {
|
||||
const outputNames = newData.outputNames;
|
||||
if (outputNames && outputNames.length > 0 && bestTrial.results) {
|
||||
const firstOut = outputNames[0];
|
||||
if (bestTrial.results[firstOut] !== undefined) {
|
||||
newData.resultValue = bestTrial.results[firstOut];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { ...node, data: newData };
|
||||
});
|
||||
}, [spec, showResults, bestTrial, trialHistory]);
|
||||
|
||||
// Convert spec to ReactFlow edges with selection styling
|
||||
const edges = useMemo(() => {
|
||||
@@ -208,12 +486,23 @@ function SpecRendererInner({
|
||||
nodesRef.current = nodes;
|
||||
}, [nodes]);
|
||||
|
||||
// Track local node state for smooth dragging
|
||||
const [localNodes, setLocalNodes] = useState(nodes);
|
||||
|
||||
// Sync local nodes with spec-derived nodes when spec changes
|
||||
useEffect(() => {
|
||||
setLocalNodes(nodes);
|
||||
}, [nodes]);
|
||||
|
||||
// Handle node position changes
|
||||
const onNodesChange = useCallback(
|
||||
(changes: NodeChange[]) => {
|
||||
if (!editable) return;
|
||||
|
||||
// Handle position changes
|
||||
// Apply changes to local state for smooth dragging
|
||||
setLocalNodes((nds) => applyNodeChanges(changes, nds));
|
||||
|
||||
// Handle position changes - save to spec when drag ends
|
||||
for (const change of changes) {
|
||||
if (change.type === 'position' && change.position && change.dragging === false) {
|
||||
// Dragging ended - update spec
|
||||
@@ -353,6 +642,18 @@ function SpecRendererInner({
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if this is a singleton type that already exists
|
||||
if (isSingletonType(type)) {
|
||||
const existingNode = localNodes.find(n => n.type === type);
|
||||
if (existingNode) {
|
||||
// Select the existing node instead of creating a duplicate
|
||||
selectNode(existingNode.id);
|
||||
// Show a toast notification would be nice here
|
||||
console.log(`${type} already exists - selected existing node`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert screen position to flow position
|
||||
const position = reactFlowInstance.current.screenToFlowPosition({
|
||||
x: event.clientX,
|
||||
@@ -363,8 +664,19 @@ function SpecRendererInner({
|
||||
const nodeData = getDefaultNodeData(type, position);
|
||||
const specType = mapNodeTypeToSpecType(type);
|
||||
|
||||
// For structural types (model, solver, algorithm, surrogate), these are
|
||||
// part of the spec structure rather than array items. Handle differently.
|
||||
const structuralTypes = ['model', 'solver', 'algorithm', 'surrogate'];
|
||||
if (structuralTypes.includes(type)) {
|
||||
// These nodes are derived from spec structure - they shouldn't be "added"
|
||||
// They already exist if the spec has that section configured
|
||||
console.log(`${type} is a structural node - configure via spec directly`);
|
||||
setError(`${type} nodes are configured via the spec. Use the config panel to edit.`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const nodeId = await addNode(specType, nodeData);
|
||||
const nodeId = await addNode(specType as 'designVar' | 'extractor' | 'objective' | 'constraint', nodeData);
|
||||
// Select the newly created node
|
||||
selectNode(nodeId);
|
||||
} catch (err) {
|
||||
@@ -372,7 +684,7 @@ function SpecRendererInner({
|
||||
setError(err instanceof Error ? err.message : 'Failed to add node');
|
||||
}
|
||||
},
|
||||
[editable, addNode, selectNode, setError]
|
||||
[editable, addNode, selectNode, setError, localNodes]
|
||||
);
|
||||
|
||||
// Loading state
|
||||
@@ -458,7 +770,7 @@ function SpecRendererInner({
|
||||
)}
|
||||
|
||||
<ReactFlow
|
||||
nodes={nodes}
|
||||
nodes={localNodes}
|
||||
edges={edges}
|
||||
onNodesChange={onNodesChange}
|
||||
onEdgesChange={onEdgesChange}
|
||||
@@ -488,10 +800,113 @@ function SpecRendererInner({
|
||||
/>
|
||||
</ReactFlow>
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="absolute bottom-4 right-4 z-10 flex gap-2">
|
||||
{/* Results toggle */}
|
||||
{bestTrial && (
|
||||
<button
|
||||
onClick={() => setShowResults(!showResults)}
|
||||
className={`flex items-center gap-2 px-3 py-2 rounded-lg transition-colors border ${
|
||||
showResults
|
||||
? 'bg-primary-600/90 text-white border-primary-500 hover:bg-primary-500'
|
||||
: 'bg-dark-800 text-dark-300 border-dark-600 hover:text-white hover:border-dark-500'
|
||||
}`}
|
||||
title={showResults ? "Hide Results" : "Show Best Trial Results"}
|
||||
>
|
||||
{showResults ? <Eye size={16} /> : <EyeOff size={16} />}
|
||||
<span className="text-sm font-medium">Results</span>
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* Validate button - shows validation status */}
|
||||
<button
|
||||
onClick={handleValidate}
|
||||
className={`flex items-center gap-2 px-3 py-2 rounded-lg transition-colors border ${
|
||||
validationStatus === 'valid'
|
||||
? 'bg-green-600/20 text-green-400 border-green-500/50 hover:bg-green-600/30'
|
||||
: validationStatus === 'invalid'
|
||||
? 'bg-red-600/20 text-red-400 border-red-500/50 hover:bg-red-600/30'
|
||||
: 'bg-dark-800 text-dark-300 border-dark-600 hover:text-white hover:border-dark-500'
|
||||
}`}
|
||||
title="Validate spec before running"
|
||||
>
|
||||
{validationStatus === 'valid' ? (
|
||||
<CheckCircle size={16} />
|
||||
) : validationStatus === 'invalid' ? (
|
||||
<AlertCircle size={16} />
|
||||
) : (
|
||||
<CheckCircle size={16} />
|
||||
)}
|
||||
<span className="text-sm font-medium">Validate</span>
|
||||
</button>
|
||||
|
||||
{/* Run/Stop button */}
|
||||
{isRunning ? (
|
||||
<button
|
||||
onClick={handleStop}
|
||||
className="flex items-center gap-2 px-4 py-2 bg-red-600 text-white rounded-lg hover:bg-red-500 shadow-lg transition-colors font-medium"
|
||||
>
|
||||
<Square size={16} fill="currentColor" />
|
||||
Stop
|
||||
</button>
|
||||
) : (
|
||||
<button
|
||||
onClick={handleRun}
|
||||
disabled={isStarting || validationStatus === 'invalid'}
|
||||
className={`flex items-center gap-2 px-4 py-2 rounded-lg shadow-lg transition-colors font-medium ${
|
||||
validationStatus === 'invalid'
|
||||
? 'bg-dark-700 text-dark-400 cursor-not-allowed'
|
||||
: 'bg-emerald-600 text-white hover:bg-emerald-500 disabled:opacity-50 disabled:cursor-not-allowed'
|
||||
}`}
|
||||
title={validationStatus === 'invalid' ? 'Fix validation errors first' : 'Start optimization'}
|
||||
>
|
||||
{isStarting ? (
|
||||
<Loader2 size={16} className="animate-spin" />
|
||||
) : (
|
||||
<Play size={16} fill="currentColor" />
|
||||
)}
|
||||
Run
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Study name badge */}
|
||||
<div className="absolute bottom-4 left-4 z-10 px-3 py-1.5 bg-dark-800/90 backdrop-blur rounded-lg border border-dark-600">
|
||||
<span className="text-sm text-dark-300">{spec.meta.study_name}</span>
|
||||
</div>
|
||||
|
||||
{/* Progress indicator when running */}
|
||||
{isRunning && wsProgress && (
|
||||
<div className="absolute bottom-4 left-1/2 -translate-x-1/2 z-10 flex items-center gap-3 px-4 py-2 bg-dark-800/95 backdrop-blur rounded-lg border border-dark-600 shadow-lg">
|
||||
<ProgressRing
|
||||
progress={wsProgress.percentage}
|
||||
size={36}
|
||||
strokeWidth={3}
|
||||
color="#10b981"
|
||||
/>
|
||||
<div className="flex flex-col">
|
||||
<span className="text-sm font-medium text-white">
|
||||
Trial {wsProgress.current} / {wsProgress.total}
|
||||
</span>
|
||||
<span className="text-xs text-dark-400">
|
||||
{wsProgress.fea_count > 0 && `${wsProgress.fea_count} FEA`}
|
||||
{wsProgress.fea_count > 0 && wsProgress.nn_count > 0 && ' + '}
|
||||
{wsProgress.nn_count > 0 && `${wsProgress.nn_count} NN`}
|
||||
{wsProgress.fea_count === 0 && wsProgress.nn_count === 0 && 'Running...'}
|
||||
</span>
|
||||
</div>
|
||||
{wsBestTrial && (
|
||||
<div className="flex flex-col border-l border-dark-600 pl-3 ml-1">
|
||||
<span className="text-xs text-dark-400">Best</span>
|
||||
<span className="text-sm font-medium text-emerald-400">
|
||||
{typeof wsBestTrial.value === 'number'
|
||||
? wsBestTrial.value.toFixed(4)
|
||||
: wsBestTrial.value}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Main Canvas Component
|
||||
export { AtomizerCanvas } from './AtomizerCanvas';
|
||||
export { SpecRenderer } from './SpecRenderer';
|
||||
|
||||
// Palette
|
||||
export { NodePalette } from './palette/NodePalette';
|
||||
|
||||
@@ -2,12 +2,14 @@ import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { ShieldAlert } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
import { ResultBadge } from './ResultBadge';
|
||||
import { ConstraintNodeData } from '../../../lib/canvas/schema';
|
||||
|
||||
function ConstraintNodeComponent(props: NodeProps<ConstraintNodeData>) {
|
||||
const { data } = props;
|
||||
return (
|
||||
<BaseNode {...props} icon={<ShieldAlert size={16} />} iconColor="text-amber-400">
|
||||
<ResultBadge value={data.resultValue} isFeasible={data.isFeasible} />
|
||||
{data.name && data.operator && data.value !== undefined
|
||||
? `${data.name} ${data.operator} ${data.value}`
|
||||
: 'Set constraint'}
|
||||
|
||||
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* CustomExtractorNode - Canvas node for custom Python extractors
|
||||
*
|
||||
* Displays custom extractors defined with inline Python code.
|
||||
* Visually distinct from builtin extractors with a code icon.
|
||||
*
|
||||
* P3.11: Custom extractor UI component
|
||||
*/
|
||||
|
||||
import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { Code2 } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
|
||||
export interface CustomExtractorNodeData {
|
||||
type: 'customExtractor';
|
||||
label: string;
|
||||
configured: boolean;
|
||||
extractorId?: string;
|
||||
extractorName?: string;
|
||||
functionName?: string;
|
||||
functionSource?: string;
|
||||
outputs?: Array<{ name: string; units?: string }>;
|
||||
dependencies?: string[];
|
||||
}
|
||||
|
||||
function CustomExtractorNodeComponent(props: NodeProps<CustomExtractorNodeData>) {
|
||||
const { data } = props;
|
||||
|
||||
// Show validation status
|
||||
const hasCode = !!data.functionSource?.trim();
|
||||
const hasOutputs = (data.outputs?.length ?? 0) > 0;
|
||||
const isConfigured = hasCode && hasOutputs;
|
||||
|
||||
return (
|
||||
<BaseNode
|
||||
{...props}
|
||||
icon={<Code2 size={16} />}
|
||||
iconColor={isConfigured ? 'text-violet-400' : 'text-dark-500'}
|
||||
>
|
||||
<div className="flex flex-col">
|
||||
<span className={isConfigured ? 'text-white' : 'text-dark-400'}>
|
||||
{data.extractorName || data.functionName || 'Custom Extractor'}
|
||||
</span>
|
||||
{!isConfigured && (
|
||||
<span className="text-xs text-amber-400">Needs configuration</span>
|
||||
)}
|
||||
{isConfigured && data.outputs && (
|
||||
<span className="text-xs text-dark-400">
|
||||
{data.outputs.length} output{data.outputs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</BaseNode>
|
||||
);
|
||||
}
|
||||
|
||||
export const CustomExtractorNode = memo(CustomExtractorNodeComponent);
|
||||
@@ -2,12 +2,14 @@ import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { SlidersHorizontal } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
import { ResultBadge } from './ResultBadge';
|
||||
import { DesignVarNodeData } from '../../../lib/canvas/schema';
|
||||
|
||||
function DesignVarNodeComponent(props: NodeProps<DesignVarNodeData>) {
|
||||
const { data } = props;
|
||||
return (
|
||||
<BaseNode {...props} icon={<SlidersHorizontal size={16} />} iconColor="text-emerald-400" inputs={0} outputs={1}>
|
||||
<ResultBadge value={data.resultValue} unit={data.unit} />
|
||||
{data.expressionName ? (
|
||||
<span className="font-mono">{data.expressionName}</span>
|
||||
) : (
|
||||
|
||||
@@ -2,12 +2,14 @@ import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { FlaskConical } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
import { ResultBadge } from './ResultBadge';
|
||||
import { ExtractorNodeData } from '../../../lib/canvas/schema';
|
||||
|
||||
function ExtractorNodeComponent(props: NodeProps<ExtractorNodeData>) {
|
||||
const { data } = props;
|
||||
return (
|
||||
<BaseNode {...props} icon={<FlaskConical size={16} />} iconColor="text-cyan-400">
|
||||
<ResultBadge value={data.resultValue} />
|
||||
{data.extractorName || 'Select extractor'}
|
||||
</BaseNode>
|
||||
);
|
||||
|
||||
@@ -2,13 +2,38 @@ import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { Target } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
import { ResultBadge } from './ResultBadge';
|
||||
import { ConvergenceSparkline } from '../visualization/ConvergenceSparkline';
|
||||
import { ObjectiveNodeData } from '../../../lib/canvas/schema';
|
||||
|
||||
function ObjectiveNodeComponent(props: NodeProps<ObjectiveNodeData>) {
|
||||
const { data } = props;
|
||||
const hasHistory = data.history && data.history.length > 1;
|
||||
|
||||
return (
|
||||
<BaseNode {...props} icon={<Target size={16} />} iconColor="text-rose-400">
|
||||
{data.name ? `${data.direction === 'maximize' ? '↑' : '↓'} ${data.name}` : 'Set objective'}
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm">
|
||||
{data.name ? `${data.direction === 'maximize' ? '↑' : '↓'} ${data.name}` : 'Set objective'}
|
||||
</span>
|
||||
<ResultBadge value={data.resultValue} label="Best" />
|
||||
</div>
|
||||
|
||||
{/* Convergence sparkline */}
|
||||
{hasHistory && (
|
||||
<div className="mt-1 -mb-1">
|
||||
<ConvergenceSparkline
|
||||
values={data.history!}
|
||||
width={120}
|
||||
height={20}
|
||||
direction={data.direction || 'minimize'}
|
||||
color={data.direction === 'maximize' ? '#34d399' : '#60a5fa'}
|
||||
showBest={true}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</BaseNode>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
import { memo } from 'react';
|
||||
|
||||
interface ResultBadgeProps {
|
||||
value: number | string | null | undefined;
|
||||
unit?: string;
|
||||
isFeasible?: boolean; // For constraints
|
||||
label?: string;
|
||||
}
|
||||
|
||||
export const ResultBadge = memo(function ResultBadge({ value, unit, isFeasible, label }: ResultBadgeProps) {
|
||||
if (value === null || value === undefined) return null;
|
||||
|
||||
const displayValue = typeof value === 'number'
|
||||
? value.toLocaleString(undefined, { maximumFractionDigits: 4 })
|
||||
: value;
|
||||
|
||||
// Determine color based on feasibility (if provided)
|
||||
let bgColor = 'bg-primary-500/20';
|
||||
let textColor = 'text-primary-300';
|
||||
let borderColor = 'border-primary-500/30';
|
||||
|
||||
if (isFeasible === true) {
|
||||
bgColor = 'bg-emerald-500/20';
|
||||
textColor = 'text-emerald-300';
|
||||
borderColor = 'border-emerald-500/30';
|
||||
} else if (isFeasible === false) {
|
||||
bgColor = 'bg-red-500/20';
|
||||
textColor = 'text-red-300';
|
||||
borderColor = 'border-red-500/30';
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={`absolute -top-3 -right-2 px-2 py-0.5 rounded-full border ${bgColor} ${borderColor} ${textColor} text-xs font-mono shadow-lg backdrop-blur-sm z-10 flex items-center gap-1`}>
|
||||
{label && <span className="opacity-70 mr-1">{label}:</span>}
|
||||
<span className="font-bold">{displayValue}</span>
|
||||
{unit && <span className="opacity-70 text-[10px] ml-0.5">{unit}</span>}
|
||||
</div>
|
||||
);
|
||||
});
|
||||
@@ -1,14 +1,44 @@
|
||||
import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { Cpu } from 'lucide-react';
|
||||
import { Cpu, Terminal } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
import { SolverNodeData } from '../../../lib/canvas/schema';
|
||||
import { SolverNodeData, SolverEngine } from '../../../lib/canvas/schema';
|
||||
|
||||
// Human-readable engine names
|
||||
const ENGINE_LABELS: Record<SolverEngine, string> = {
|
||||
nxnastran: 'NX Nastran',
|
||||
mscnastran: 'MSC Nastran',
|
||||
python: 'Python Script',
|
||||
abaqus: 'Abaqus',
|
||||
ansys: 'ANSYS',
|
||||
};
|
||||
|
||||
function SolverNodeComponent(props: NodeProps<SolverNodeData>) {
|
||||
const { data } = props;
|
||||
|
||||
// Build display string: "Engine - SolutionType" or just one
|
||||
const engineLabel = data.engine ? ENGINE_LABELS[data.engine] : null;
|
||||
const solverTypeLabel = data.solverType || null;
|
||||
|
||||
let displayText: string;
|
||||
if (engineLabel && solverTypeLabel) {
|
||||
displayText = `${engineLabel} (${solverTypeLabel})`;
|
||||
} else if (engineLabel) {
|
||||
displayText = engineLabel;
|
||||
} else if (solverTypeLabel) {
|
||||
displayText = solverTypeLabel;
|
||||
} else {
|
||||
displayText = 'Configure solver';
|
||||
}
|
||||
|
||||
// Use Terminal icon for Python, Cpu for others
|
||||
const icon = data.engine === 'python'
|
||||
? <Terminal size={16} />
|
||||
: <Cpu size={16} />;
|
||||
|
||||
return (
|
||||
<BaseNode {...props} icon={<Cpu size={16} />} iconColor="text-violet-400">
|
||||
{data.solverType || 'Select solution'}
|
||||
<BaseNode {...props} icon={icon} iconColor="text-violet-400">
|
||||
{displayText}
|
||||
</BaseNode>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -54,6 +54,9 @@ export interface NodePaletteProps {
|
||||
// Constants
|
||||
// ============================================================================
|
||||
|
||||
/** Singleton node types - only one of each allowed on canvas */
|
||||
export const SINGLETON_TYPES: NodeType[] = ['model', 'solver', 'algorithm', 'surrogate'];
|
||||
|
||||
export const PALETTE_ITEMS: PaletteItem[] = [
|
||||
{
|
||||
type: 'model',
|
||||
@@ -61,15 +64,15 @@ export const PALETTE_ITEMS: PaletteItem[] = [
|
||||
icon: Box,
|
||||
description: 'NX model file (.prt, .sim)',
|
||||
color: 'text-blue-400',
|
||||
canAdd: false, // Synthetic - derived from spec
|
||||
canAdd: true, // Singleton - only one allowed
|
||||
},
|
||||
{
|
||||
type: 'solver',
|
||||
label: 'Solver',
|
||||
icon: Cpu,
|
||||
description: 'Nastran solution type',
|
||||
description: 'Analysis solver config',
|
||||
color: 'text-violet-400',
|
||||
canAdd: false, // Synthetic - derived from model
|
||||
canAdd: true, // Singleton - only one allowed
|
||||
},
|
||||
{
|
||||
type: 'designVar',
|
||||
@@ -109,7 +112,7 @@ export const PALETTE_ITEMS: PaletteItem[] = [
|
||||
icon: BrainCircuit,
|
||||
description: 'Optimization method',
|
||||
color: 'text-indigo-400',
|
||||
canAdd: false, // Synthetic - derived from spec.optimization
|
||||
canAdd: true, // Singleton - only one allowed
|
||||
},
|
||||
{
|
||||
type: 'surrogate',
|
||||
@@ -117,7 +120,7 @@ export const PALETTE_ITEMS: PaletteItem[] = [
|
||||
icon: Rocket,
|
||||
description: 'Neural acceleration',
|
||||
color: 'text-pink-400',
|
||||
canAdd: false, // Synthetic - derived from spec.optimization.surrogate
|
||||
canAdd: true, // Singleton - only one allowed
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -0,0 +1,360 @@
|
||||
/**
|
||||
* CustomExtractorPanel - Panel for editing custom Python extractors
|
||||
*
|
||||
* Provides a code editor for writing custom extraction functions,
|
||||
* output definitions, and validation.
|
||||
*
|
||||
* P3.12: Custom extractor UI component
|
||||
*/
|
||||
|
||||
import { useState, useCallback } from 'react';
|
||||
import { X, Play, AlertCircle, CheckCircle, Plus, Trash2, HelpCircle } from 'lucide-react';
|
||||
|
||||
interface CustomExtractorOutput {
|
||||
name: string;
|
||||
units?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
interface CustomExtractorPanelProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
initialName?: string;
|
||||
initialFunctionName?: string;
|
||||
initialSource?: string;
|
||||
initialOutputs?: CustomExtractorOutput[];
|
||||
initialDependencies?: string[];
|
||||
onSave: (data: {
|
||||
name: string;
|
||||
functionName: string;
|
||||
source: string;
|
||||
outputs: CustomExtractorOutput[];
|
||||
dependencies: string[];
|
||||
}) => void;
|
||||
}
|
||||
|
||||
// Common styling classes
|
||||
const inputClass =
|
||||
'w-full px-3 py-2 bg-dark-800 border border-dark-600 text-white placeholder-dark-400 rounded-lg focus:border-primary-500 focus:outline-none transition-colors';
|
||||
const labelClass = 'block text-sm font-medium text-dark-300 mb-1';
|
||||
|
||||
// Default extractor template
|
||||
const DEFAULT_SOURCE = `def extract(op2_path, bdf_path=None, params=None, working_dir=None):
|
||||
"""
|
||||
Custom extractor function.
|
||||
|
||||
Args:
|
||||
op2_path: Path to the OP2 results file
|
||||
bdf_path: Optional path to the BDF model file
|
||||
params: Dictionary of current design parameters
|
||||
working_dir: Path to the current trial directory
|
||||
|
||||
Returns:
|
||||
Dictionary of output_name -> value
|
||||
OR a single float value
|
||||
OR a list/tuple of values (mapped to outputs in order)
|
||||
"""
|
||||
import numpy as np
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
# Load OP2 results
|
||||
op2 = OP2(op2_path, debug=False)
|
||||
|
||||
# Example: compute custom metric
|
||||
# ... your extraction logic here ...
|
||||
|
||||
result = 0.0
|
||||
|
||||
return {"custom_output": result}
|
||||
`;
|
||||
|
||||
export function CustomExtractorPanel({
|
||||
isOpen,
|
||||
onClose,
|
||||
initialName = '',
|
||||
initialFunctionName = 'extract',
|
||||
initialSource = DEFAULT_SOURCE,
|
||||
initialOutputs = [{ name: 'custom_output', units: '' }],
|
||||
initialDependencies = [],
|
||||
onSave,
|
||||
}: CustomExtractorPanelProps) {
|
||||
const [name, setName] = useState(initialName);
|
||||
const [functionName, setFunctionName] = useState(initialFunctionName);
|
||||
const [source, setSource] = useState(initialSource);
|
||||
const [outputs, setOutputs] = useState<CustomExtractorOutput[]>(initialOutputs);
|
||||
const [dependencies] = useState<string[]>(initialDependencies);
|
||||
const [validation, setValidation] = useState<{
|
||||
valid: boolean;
|
||||
errors: string[];
|
||||
} | null>(null);
|
||||
const [isValidating, setIsValidating] = useState(false);
|
||||
const [showHelp, setShowHelp] = useState(false);
|
||||
|
||||
// Add a new output
|
||||
const addOutput = useCallback(() => {
|
||||
setOutputs((prev) => [...prev, { name: '', units: '' }]);
|
||||
}, []);
|
||||
|
||||
// Remove an output
|
||||
const removeOutput = useCallback((index: number) => {
|
||||
setOutputs((prev) => prev.filter((_, i) => i !== index));
|
||||
}, []);
|
||||
|
||||
// Update an output
|
||||
const updateOutput = useCallback(
|
||||
(index: number, field: keyof CustomExtractorOutput, value: string) => {
|
||||
setOutputs((prev) =>
|
||||
prev.map((out, i) => (i === index ? { ...out, [field]: value } : out))
|
||||
);
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
// Validate the code
|
||||
const validateCode = useCallback(async () => {
|
||||
setIsValidating(true);
|
||||
setValidation(null);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/spec/validate-extractor', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
function_name: functionName,
|
||||
source: source,
|
||||
}),
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
setValidation({
|
||||
valid: result.valid,
|
||||
errors: result.errors || [],
|
||||
});
|
||||
} catch (error) {
|
||||
setValidation({
|
||||
valid: false,
|
||||
errors: ['Failed to validate: ' + (error instanceof Error ? error.message : 'Unknown error')],
|
||||
});
|
||||
} finally {
|
||||
setIsValidating(false);
|
||||
}
|
||||
}, [functionName, source]);
|
||||
|
||||
// Handle save
|
||||
const handleSave = useCallback(() => {
|
||||
// Filter out empty outputs
|
||||
const validOutputs = outputs.filter((o) => o.name.trim());
|
||||
|
||||
if (!name.trim()) {
|
||||
setValidation({ valid: false, errors: ['Name is required'] });
|
||||
return;
|
||||
}
|
||||
|
||||
if (validOutputs.length === 0) {
|
||||
setValidation({ valid: false, errors: ['At least one output is required'] });
|
||||
return;
|
||||
}
|
||||
|
||||
onSave({
|
||||
name: name.trim(),
|
||||
functionName: functionName.trim() || 'extract',
|
||||
source,
|
||||
outputs: validOutputs,
|
||||
dependencies: dependencies.filter((d) => d.trim()),
|
||||
});
|
||||
onClose();
|
||||
}, [name, functionName, source, outputs, dependencies, onSave, onClose]);
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 bg-black/50 flex items-center justify-center z-50">
|
||||
<div className="bg-dark-850 rounded-xl shadow-2xl w-[900px] max-h-[90vh] flex flex-col border border-dark-700">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-6 py-4 border-b border-dark-700">
|
||||
<h2 className="text-lg font-semibold text-white">Custom Extractor</h2>
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={() => setShowHelp(!showHelp)}
|
||||
className="p-2 text-dark-400 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
title="Show help"
|
||||
>
|
||||
<HelpCircle size={20} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-2 text-dark-400 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
>
|
||||
<X size={20} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-auto p-6">
|
||||
{/* Help Section */}
|
||||
{showHelp && (
|
||||
<div className="mb-4 p-4 bg-primary-900/20 border border-primary-700 rounded-lg">
|
||||
<h3 className="text-sm font-semibold text-primary-400 mb-2">How Custom Extractors Work</h3>
|
||||
<ul className="text-sm text-dark-300 space-y-1">
|
||||
<li>• Your function receives the path to OP2 results and optional BDF/params</li>
|
||||
<li>• Use pyNastran, numpy, scipy for data extraction and analysis</li>
|
||||
<li>• Return a dictionary mapping output names to numeric values</li>
|
||||
<li>• Outputs can be used as objectives or constraints in optimization</li>
|
||||
<li>• Code runs in a sandboxed environment (no file I/O beyond OP2/BDF)</li>
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="grid grid-cols-2 gap-6">
|
||||
{/* Left Column - Basic Info & Outputs */}
|
||||
<div className="space-y-4">
|
||||
{/* Name */}
|
||||
<div>
|
||||
<label className={labelClass}>Extractor Name</label>
|
||||
<input
|
||||
type="text"
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
placeholder="My Custom Extractor"
|
||||
className={inputClass}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Function Name */}
|
||||
<div>
|
||||
<label className={labelClass}>Function Name</label>
|
||||
<input
|
||||
type="text"
|
||||
value={functionName}
|
||||
onChange={(e) => setFunctionName(e.target.value)}
|
||||
placeholder="extract"
|
||||
className={`${inputClass} font-mono`}
|
||||
/>
|
||||
<p className="text-xs text-dark-500 mt-1">
|
||||
Name of the Python function in your code
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Outputs */}
|
||||
<div>
|
||||
<label className={labelClass}>Outputs</label>
|
||||
<div className="space-y-2">
|
||||
{outputs.map((output, index) => (
|
||||
<div key={index} className="flex gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={output.name}
|
||||
onChange={(e) => updateOutput(index, 'name', e.target.value)}
|
||||
placeholder="output_name"
|
||||
className={`${inputClass} font-mono flex-1`}
|
||||
/>
|
||||
<input
|
||||
type="text"
|
||||
value={output.units || ''}
|
||||
onChange={(e) => updateOutput(index, 'units', e.target.value)}
|
||||
placeholder="units"
|
||||
className={`${inputClass} w-24`}
|
||||
/>
|
||||
<button
|
||||
onClick={() => removeOutput(index)}
|
||||
className="p-2 text-red-400 hover:text-red-300 hover:bg-red-900/20 rounded-lg transition-colors"
|
||||
disabled={outputs.length === 1}
|
||||
>
|
||||
<Trash2 size={16} />
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
<button
|
||||
onClick={addOutput}
|
||||
className="flex items-center gap-1 text-sm text-primary-400 hover:text-primary-300 transition-colors"
|
||||
>
|
||||
<Plus size={14} />
|
||||
Add Output
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Validation Status */}
|
||||
{validation && (
|
||||
<div
|
||||
className={`p-3 rounded-lg border ${
|
||||
validation.valid
|
||||
? 'bg-green-900/20 border-green-700'
|
||||
: 'bg-red-900/20 border-red-700'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
{validation.valid ? (
|
||||
<CheckCircle size={16} className="text-green-400" />
|
||||
) : (
|
||||
<AlertCircle size={16} className="text-red-400" />
|
||||
)}
|
||||
<span
|
||||
className={`text-sm font-medium ${
|
||||
validation.valid ? 'text-green-400' : 'text-red-400'
|
||||
}`}
|
||||
>
|
||||
{validation.valid ? 'Code is valid' : 'Validation failed'}
|
||||
</span>
|
||||
</div>
|
||||
{validation.errors.length > 0 && (
|
||||
<ul className="mt-2 text-sm text-red-300 space-y-1">
|
||||
{validation.errors.map((err, i) => (
|
||||
<li key={i}>• {err}</li>
|
||||
))}
|
||||
</ul>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Right Column - Code Editor */}
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between">
|
||||
<label className={labelClass}>Python Code</label>
|
||||
<button
|
||||
onClick={validateCode}
|
||||
disabled={isValidating}
|
||||
className="flex items-center gap-1 px-3 py-1 bg-primary-600 hover:bg-primary-500
|
||||
text-white text-sm rounded-lg transition-colors disabled:opacity-50"
|
||||
>
|
||||
<Play size={14} />
|
||||
{isValidating ? 'Validating...' : 'Validate'}
|
||||
</button>
|
||||
</div>
|
||||
<textarea
|
||||
value={source}
|
||||
onChange={(e) => {
|
||||
setSource(e.target.value);
|
||||
setValidation(null);
|
||||
}}
|
||||
className={`${inputClass} h-[400px] font-mono text-sm resize-none`}
|
||||
spellCheck={false}
|
||||
/>
|
||||
<p className="text-xs text-dark-500">
|
||||
Available modules: numpy, scipy, pyNastran, math, statistics
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-end gap-3 px-6 py-4 border-t border-dark-700">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-4 py-2 text-dark-300 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button
|
||||
onClick={handleSave}
|
||||
className="px-4 py-2 bg-primary-600 hover:bg-primary-500 text-white rounded-lg transition-colors"
|
||||
>
|
||||
Save Extractor
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,255 @@
|
||||
/**
|
||||
* ErrorPanel - Displays optimization errors with recovery options
|
||||
*
|
||||
* Shows errors that occurred during optimization with:
|
||||
* - Error classification (NX crash, solver failure, etc.)
|
||||
* - Recovery suggestions
|
||||
* - Ability to dismiss individual errors
|
||||
* - Support for multiple simultaneous errors
|
||||
*/
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import {
|
||||
X,
|
||||
AlertTriangle,
|
||||
AlertOctagon,
|
||||
RefreshCw,
|
||||
Minimize2,
|
||||
Maximize2,
|
||||
Trash2,
|
||||
Bug,
|
||||
Cpu,
|
||||
FileWarning,
|
||||
Settings,
|
||||
Server,
|
||||
} from 'lucide-react';
|
||||
import { useErrorPanel, usePanelStore, OptimizationError } from '../../../hooks/usePanelStore';
|
||||
|
||||
interface ErrorPanelProps {
|
||||
onClose: () => void;
|
||||
onRetry?: (trial?: number) => void;
|
||||
onSkipTrial?: (trial: number) => void;
|
||||
}
|
||||
|
||||
export function ErrorPanel({ onClose, onRetry, onSkipTrial }: ErrorPanelProps) {
|
||||
const panel = useErrorPanel();
|
||||
const { minimizePanel, dismissError, clearErrors } = usePanelStore();
|
||||
|
||||
const sortedErrors = useMemo(() => {
|
||||
return [...panel.errors].sort((a, b) => b.timestamp - a.timestamp);
|
||||
}, [panel.errors]);
|
||||
|
||||
if (!panel.open || panel.errors.length === 0) return null;
|
||||
|
||||
// Minimized view
|
||||
if (panel.minimized) {
|
||||
return (
|
||||
<div
|
||||
className="bg-dark-850 border border-red-500/50 rounded-lg shadow-xl flex items-center gap-2 px-3 py-2 cursor-pointer hover:bg-dark-800 transition-colors"
|
||||
onClick={() => minimizePanel('error')}
|
||||
>
|
||||
<AlertOctagon size={16} className="text-red-400" />
|
||||
<span className="text-sm text-white font-medium">
|
||||
{panel.errors.length} Error{panel.errors.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
<Maximize2 size={14} className="text-dark-400" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="bg-dark-850 border border-red-500/30 rounded-xl w-[420px] max-h-[500px] flex flex-col shadow-xl">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700 bg-red-500/5">
|
||||
<div className="flex items-center gap-2">
|
||||
<AlertOctagon size={18} className="text-red-400" />
|
||||
<span className="font-medium text-white">
|
||||
Optimization Errors ({panel.errors.length})
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
{panel.errors.length > 1 && (
|
||||
<button
|
||||
onClick={clearErrors}
|
||||
className="p-1.5 text-dark-400 hover:text-red-400 hover:bg-red-500/10 rounded transition-colors"
|
||||
title="Clear all errors"
|
||||
>
|
||||
<Trash2 size={14} />
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={() => minimizePanel('error')}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
title="Minimize"
|
||||
>
|
||||
<Minimize2 size={14} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-y-auto p-3 space-y-3">
|
||||
{sortedErrors.map((error) => (
|
||||
<ErrorItem
|
||||
key={error.timestamp}
|
||||
error={error}
|
||||
onDismiss={() => dismissError(error.timestamp)}
|
||||
onRetry={onRetry}
|
||||
onSkipTrial={onSkipTrial}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Error Item Component
|
||||
// ============================================================================
|
||||
|
||||
interface ErrorItemProps {
|
||||
error: OptimizationError;
|
||||
onDismiss: () => void;
|
||||
onRetry?: (trial?: number) => void;
|
||||
onSkipTrial?: (trial: number) => void;
|
||||
}
|
||||
|
||||
function ErrorItem({ error, onDismiss, onRetry, onSkipTrial }: ErrorItemProps) {
|
||||
const icon = getErrorIcon(error.type);
|
||||
const typeLabel = getErrorTypeLabel(error.type);
|
||||
const timeAgo = getTimeAgo(error.timestamp);
|
||||
|
||||
return (
|
||||
<div className="bg-dark-800 rounded-lg border border-dark-700 overflow-hidden">
|
||||
{/* Error header */}
|
||||
<div className="flex items-start gap-3 p-3">
|
||||
<div className="p-2 bg-red-500/10 rounded-lg flex-shrink-0">
|
||||
{icon}
|
||||
</div>
|
||||
<div className="flex-1 min-w-0">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<span className="text-xs font-medium text-red-400 uppercase tracking-wide">
|
||||
{typeLabel}
|
||||
</span>
|
||||
{error.trial !== undefined && (
|
||||
<span className="text-xs text-dark-500">
|
||||
Trial #{error.trial}
|
||||
</span>
|
||||
)}
|
||||
<span className="text-xs text-dark-600 ml-auto">
|
||||
{timeAgo}
|
||||
</span>
|
||||
</div>
|
||||
<p className="text-sm text-white">{error.message}</p>
|
||||
{error.details && (
|
||||
<p className="text-xs text-dark-400 mt-1 font-mono bg-dark-900 p-2 rounded mt-2 max-h-20 overflow-y-auto">
|
||||
{error.details}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
<button
|
||||
onClick={onDismiss}
|
||||
className="p-1 text-dark-500 hover:text-white hover:bg-dark-700 rounded transition-colors flex-shrink-0"
|
||||
title="Dismiss"
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Suggestions */}
|
||||
{error.suggestions.length > 0 && (
|
||||
<div className="px-3 pb-3">
|
||||
<p className="text-xs text-dark-500 mb-1.5">Suggestions:</p>
|
||||
<ul className="text-xs text-dark-300 space-y-1">
|
||||
{error.suggestions.map((suggestion, idx) => (
|
||||
<li key={idx} className="flex items-start gap-1.5">
|
||||
<span className="text-dark-500">-</span>
|
||||
<span>{suggestion}</span>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Actions */}
|
||||
{error.recoverable && (
|
||||
<div className="flex items-center gap-2 px-3 pb-3">
|
||||
{onRetry && (
|
||||
<button
|
||||
onClick={() => onRetry(error.trial)}
|
||||
className="flex items-center gap-1.5 px-3 py-1.5 bg-primary-600 hover:bg-primary-500
|
||||
text-white text-xs font-medium rounded transition-colors"
|
||||
>
|
||||
<RefreshCw size={12} />
|
||||
Retry{error.trial !== undefined ? ' Trial' : ''}
|
||||
</button>
|
||||
)}
|
||||
{onSkipTrial && error.trial !== undefined && (
|
||||
<button
|
||||
onClick={() => onSkipTrial(error.trial!)}
|
||||
className="flex items-center gap-1.5 px-3 py-1.5 bg-dark-700 hover:bg-dark-600
|
||||
text-dark-200 text-xs font-medium rounded transition-colors"
|
||||
>
|
||||
Skip Trial
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helper Functions
|
||||
// ============================================================================
|
||||
|
||||
function getErrorIcon(type: OptimizationError['type']) {
|
||||
switch (type) {
|
||||
case 'nx_crash':
|
||||
return <Cpu size={16} className="text-red-400" />;
|
||||
case 'solver_fail':
|
||||
return <AlertTriangle size={16} className="text-amber-400" />;
|
||||
case 'extractor_error':
|
||||
return <FileWarning size={16} className="text-orange-400" />;
|
||||
case 'config_error':
|
||||
return <Settings size={16} className="text-blue-400" />;
|
||||
case 'system_error':
|
||||
return <Server size={16} className="text-purple-400" />;
|
||||
default:
|
||||
return <Bug size={16} className="text-red-400" />;
|
||||
}
|
||||
}
|
||||
|
||||
function getErrorTypeLabel(type: OptimizationError['type']) {
|
||||
switch (type) {
|
||||
case 'nx_crash':
|
||||
return 'NX Crash';
|
||||
case 'solver_fail':
|
||||
return 'Solver Failure';
|
||||
case 'extractor_error':
|
||||
return 'Extractor Error';
|
||||
case 'config_error':
|
||||
return 'Configuration Error';
|
||||
case 'system_error':
|
||||
return 'System Error';
|
||||
default:
|
||||
return 'Unknown Error';
|
||||
}
|
||||
}
|
||||
|
||||
function getTimeAgo(timestamp: number): string {
|
||||
const seconds = Math.floor((Date.now() - timestamp) / 1000);
|
||||
|
||||
if (seconds < 60) return 'just now';
|
||||
if (seconds < 3600) return `${Math.floor(seconds / 60)}m ago`;
|
||||
if (seconds < 86400) return `${Math.floor(seconds / 3600)}h ago`;
|
||||
return `${Math.floor(seconds / 86400)}d ago`;
|
||||
}
|
||||
|
||||
export default ErrorPanel;
|
||||
@@ -0,0 +1,485 @@
|
||||
/**
|
||||
* FloatingIntrospectionPanel - Persistent introspection panel using store
|
||||
*
|
||||
* This is a wrapper around the existing IntrospectionPanel that:
|
||||
* 1. Gets its state from usePanelStore instead of local state
|
||||
* 2. Persists data when the panel is closed and reopened
|
||||
* 3. Can be opened from anywhere without losing state
|
||||
*/
|
||||
|
||||
import { useState, useEffect, useCallback, useMemo } from 'react';
|
||||
import {
|
||||
X,
|
||||
Search,
|
||||
RefreshCw,
|
||||
Plus,
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
Cpu,
|
||||
SlidersHorizontal,
|
||||
Scale,
|
||||
Minimize2,
|
||||
Maximize2,
|
||||
} from 'lucide-react';
|
||||
import {
|
||||
useIntrospectionPanel,
|
||||
usePanelStore,
|
||||
} from '../../../hooks/usePanelStore';
|
||||
import { useSpecStore } from '../../../hooks/useSpecStore';
|
||||
|
||||
interface FloatingIntrospectionPanelProps {
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
// Reuse types from original IntrospectionPanel
|
||||
interface Expression {
|
||||
name: string;
|
||||
value: number;
|
||||
rhs?: string;
|
||||
min?: number;
|
||||
max?: number;
|
||||
unit?: string;
|
||||
units?: string;
|
||||
type: string;
|
||||
source?: string;
|
||||
}
|
||||
|
||||
interface ExpressionsResult {
|
||||
user: Expression[];
|
||||
internal: Expression[];
|
||||
total_count: number;
|
||||
user_count: number;
|
||||
}
|
||||
|
||||
interface IntrospectionResult {
|
||||
solver_type?: string;
|
||||
expressions?: ExpressionsResult;
|
||||
// Allow other properties from the API response
|
||||
file_deps?: unknown[];
|
||||
fea_results?: unknown[];
|
||||
fem_mesh?: unknown;
|
||||
sim_solutions?: unknown[];
|
||||
sim_bcs?: unknown[];
|
||||
mass_properties?: {
|
||||
total_mass?: number;
|
||||
center_of_gravity?: { x: number; y: number; z: number };
|
||||
[key: string]: unknown;
|
||||
};
|
||||
}
|
||||
|
||||
interface ModelFileInfo {
|
||||
name: string;
|
||||
stem: string;
|
||||
type: string;
|
||||
description?: string;
|
||||
size_kb: number;
|
||||
has_cache: boolean;
|
||||
}
|
||||
|
||||
interface ModelFilesResponse {
|
||||
files: {
|
||||
sim: ModelFileInfo[];
|
||||
afm: ModelFileInfo[];
|
||||
fem: ModelFileInfo[];
|
||||
idealized: ModelFileInfo[];
|
||||
prt: ModelFileInfo[];
|
||||
};
|
||||
all_files: ModelFileInfo[];
|
||||
}
|
||||
|
||||
export function FloatingIntrospectionPanel({ onClose }: FloatingIntrospectionPanelProps) {
|
||||
const panel = useIntrospectionPanel();
|
||||
const {
|
||||
minimizePanel,
|
||||
updateIntrospectionResult,
|
||||
setIntrospectionLoading,
|
||||
setIntrospectionError,
|
||||
setIntrospectionFile,
|
||||
} = usePanelStore();
|
||||
const { addNode } = useSpecStore();
|
||||
|
||||
// Local UI state
|
||||
const [expandedSections, setExpandedSections] = useState<Set<string>>(
|
||||
new Set(['expressions', 'extractors', 'file_deps', 'fea_results', 'fem_mesh', 'sim_solutions', 'sim_bcs'])
|
||||
);
|
||||
const [searchTerm, setSearchTerm] = useState('');
|
||||
const [modelFiles, setModelFiles] = useState<ModelFilesResponse | null>(null);
|
||||
const [isLoadingFiles, setIsLoadingFiles] = useState(false);
|
||||
|
||||
const data = panel.data;
|
||||
const result = data?.result as IntrospectionResult | undefined;
|
||||
const isLoading = data?.isLoading || false;
|
||||
const error = data?.error as string | null;
|
||||
|
||||
// Fetch available files when studyId changes
|
||||
const fetchAvailableFiles = useCallback(async () => {
|
||||
if (!data?.studyId) return;
|
||||
|
||||
setIsLoadingFiles(true);
|
||||
try {
|
||||
const res = await fetch(`/api/optimization/studies/${data.studyId}/nx/parts`);
|
||||
if (res.ok) {
|
||||
const filesData = await res.json();
|
||||
setModelFiles(filesData);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to fetch model files:', e);
|
||||
} finally {
|
||||
setIsLoadingFiles(false);
|
||||
}
|
||||
}, [data?.studyId]);
|
||||
|
||||
// Run introspection
|
||||
const runIntrospection = useCallback(async (fileName?: string) => {
|
||||
if (!data?.filePath && !data?.studyId) return;
|
||||
|
||||
setIntrospectionLoading(true);
|
||||
setIntrospectionError(null);
|
||||
|
||||
try {
|
||||
let res;
|
||||
|
||||
if (data?.studyId) {
|
||||
const endpoint = fileName
|
||||
? `/api/optimization/studies/${data.studyId}/nx/introspect/${encodeURIComponent(fileName)}`
|
||||
: `/api/optimization/studies/${data.studyId}/nx/introspect`;
|
||||
res = await fetch(endpoint);
|
||||
} else {
|
||||
res = await fetch('/api/nx/introspect', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ file_path: data?.filePath }),
|
||||
});
|
||||
}
|
||||
|
||||
if (!res.ok) {
|
||||
const errData = await res.json().catch(() => ({}));
|
||||
throw new Error(errData.detail || 'Introspection failed');
|
||||
}
|
||||
|
||||
const responseData = await res.json();
|
||||
updateIntrospectionResult(responseData.introspection || responseData);
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : 'Failed to introspect model';
|
||||
setIntrospectionError(msg);
|
||||
console.error('Introspection error:', e);
|
||||
}
|
||||
}, [data?.filePath, data?.studyId, setIntrospectionLoading, setIntrospectionError, updateIntrospectionResult]);
|
||||
|
||||
// Fetch files list on mount
|
||||
useEffect(() => {
|
||||
fetchAvailableFiles();
|
||||
}, [fetchAvailableFiles]);
|
||||
|
||||
// Run introspection when panel opens or selected file changes
|
||||
useEffect(() => {
|
||||
if (panel.open && data && !result && !isLoading) {
|
||||
runIntrospection(data.selectedFile);
|
||||
}
|
||||
}, [panel.open, data?.selectedFile]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
const handleFileChange = (e: React.ChangeEvent<HTMLSelectElement>) => {
|
||||
const newFile = e.target.value;
|
||||
setIntrospectionFile(newFile);
|
||||
runIntrospection(newFile);
|
||||
};
|
||||
|
||||
const toggleSection = (section: string) => {
|
||||
setExpandedSections((prev) => {
|
||||
const next = new Set(prev);
|
||||
if (next.has(section)) next.delete(section);
|
||||
else next.add(section);
|
||||
return next;
|
||||
});
|
||||
};
|
||||
|
||||
// Handle both array format (old) and object format (new API)
|
||||
const allExpressions: Expression[] = useMemo(() => {
|
||||
if (!result?.expressions) return [];
|
||||
|
||||
if (Array.isArray(result.expressions)) {
|
||||
return result.expressions as Expression[];
|
||||
}
|
||||
|
||||
const exprObj = result.expressions as ExpressionsResult;
|
||||
return [...(exprObj.user || []), ...(exprObj.internal || [])];
|
||||
}, [result?.expressions]);
|
||||
|
||||
const filteredExpressions = allExpressions.filter((e) =>
|
||||
e.name.toLowerCase().includes(searchTerm.toLowerCase())
|
||||
);
|
||||
|
||||
const addExpressionAsDesignVar = (expr: Expression) => {
|
||||
const minValue = expr.min ?? expr.value * 0.5;
|
||||
const maxValue = expr.max ?? expr.value * 1.5;
|
||||
|
||||
addNode('designVar', {
|
||||
name: expr.name,
|
||||
expression_name: expr.name,
|
||||
type: 'continuous',
|
||||
bounds: { min: minValue, max: maxValue },
|
||||
baseline: expr.value,
|
||||
units: expr.unit || expr.units,
|
||||
enabled: true,
|
||||
});
|
||||
};
|
||||
|
||||
if (!panel.open) return null;
|
||||
|
||||
// Minimized view
|
||||
if (panel.minimized) {
|
||||
return (
|
||||
<div
|
||||
className="bg-dark-850 border border-dark-700 rounded-lg shadow-xl flex items-center gap-2 px-3 py-2 cursor-pointer hover:bg-dark-800 transition-colors"
|
||||
onClick={() => minimizePanel('introspection')}
|
||||
>
|
||||
<Search size={16} className="text-primary-400" />
|
||||
<span className="text-sm text-white font-medium">
|
||||
Model Introspection
|
||||
{data?.selectedFile && <span className="text-dark-400 ml-1">({data.selectedFile})</span>}
|
||||
</span>
|
||||
<Maximize2 size={14} className="text-dark-400" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="bg-dark-850 border border-dark-700 rounded-xl w-80 max-h-[70vh] flex flex-col shadow-xl">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700">
|
||||
<div className="flex items-center gap-2">
|
||||
<Search size={16} className="text-primary-400" />
|
||||
<span className="font-medium text-white text-sm">
|
||||
Model Introspection
|
||||
{data?.selectedFile && <span className="text-primary-400 ml-1">({data.selectedFile})</span>}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<button
|
||||
onClick={() => runIntrospection(data?.selectedFile)}
|
||||
disabled={isLoading}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
title="Refresh"
|
||||
>
|
||||
<RefreshCw size={14} className={isLoading ? 'animate-spin' : ''} />
|
||||
</button>
|
||||
<button
|
||||
onClick={() => minimizePanel('introspection')}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
title="Minimize"
|
||||
>
|
||||
<Minimize2 size={14} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* File Selector + Search */}
|
||||
<div className="px-4 py-2 border-b border-dark-700 space-y-2">
|
||||
{data?.studyId && modelFiles && modelFiles.all_files.length > 0 && (
|
||||
<div className="flex items-center gap-2">
|
||||
<label className="text-xs text-dark-400 whitespace-nowrap">File:</label>
|
||||
<select
|
||||
value={data?.selectedFile || ''}
|
||||
onChange={handleFileChange}
|
||||
disabled={isLoading || isLoadingFiles}
|
||||
className="flex-1 px-2 py-1.5 bg-dark-800 border border-dark-600 rounded-lg
|
||||
text-sm text-white focus:outline-none focus:border-primary-500
|
||||
disabled:opacity-50"
|
||||
>
|
||||
<option value="">Default (Assembly)</option>
|
||||
|
||||
{modelFiles.files.sim.length > 0 && (
|
||||
<optgroup label="Simulation (.sim)">
|
||||
{modelFiles.files.sim.map(f => (
|
||||
<option key={f.name} value={f.name}>
|
||||
{f.stem} ({f.size_kb > 1000 ? `${(f.size_kb/1024).toFixed(1)}MB` : `${f.size_kb}KB`})
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
)}
|
||||
|
||||
{modelFiles.files.afm.length > 0 && (
|
||||
<optgroup label="Assembly FEM (.afm)">
|
||||
{modelFiles.files.afm.map(f => (
|
||||
<option key={f.name} value={f.name}>
|
||||
{f.stem} ({f.size_kb > 1000 ? `${(f.size_kb/1024).toFixed(1)}MB` : `${f.size_kb}KB`})
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
)}
|
||||
|
||||
{modelFiles.files.fem.length > 0 && (
|
||||
<optgroup label="FEM (.fem)">
|
||||
{modelFiles.files.fem.map(f => (
|
||||
<option key={f.name} value={f.name}>
|
||||
{f.stem} ({f.size_kb > 1000 ? `${(f.size_kb/1024).toFixed(1)}MB` : `${f.size_kb}KB`})
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
)}
|
||||
|
||||
{modelFiles.files.prt.length > 0 && (
|
||||
<optgroup label="Geometry (.prt)">
|
||||
{modelFiles.files.prt.map(f => (
|
||||
<option key={f.name} value={f.name}>
|
||||
{f.stem} ({f.size_kb > 1000 ? `${(f.size_kb/1024).toFixed(1)}MB` : `${f.size_kb}KB`})
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
)}
|
||||
|
||||
{modelFiles.files.idealized.length > 0 && (
|
||||
<optgroup label="Idealized (_i.prt)">
|
||||
{modelFiles.files.idealized.map(f => (
|
||||
<option key={f.name} value={f.name}>
|
||||
{f.stem} ({f.size_kb > 1000 ? `${(f.size_kb/1024).toFixed(1)}MB` : `${f.size_kb}KB`})
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
)}
|
||||
</select>
|
||||
{isLoadingFiles && (
|
||||
<RefreshCw size={12} className="animate-spin text-dark-400" />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Filter expressions..."
|
||||
value={searchTerm}
|
||||
onChange={(e) => setSearchTerm(e.target.value)}
|
||||
className="w-full px-3 py-1.5 bg-dark-800 border border-dark-600 rounded-lg
|
||||
text-sm text-white placeholder-dark-500 focus:outline-none focus:border-primary-500"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-auto">
|
||||
{isLoading ? (
|
||||
<div className="flex items-center justify-center h-32 text-dark-500">
|
||||
<RefreshCw size={20} className="animate-spin mr-2" />
|
||||
Analyzing model...
|
||||
</div>
|
||||
) : error ? (
|
||||
<div className="p-4 text-red-400 text-sm">{error}</div>
|
||||
) : result ? (
|
||||
<div className="p-2 space-y-2">
|
||||
{/* Solver Type */}
|
||||
{result.solver_type && (
|
||||
<div className="p-2 bg-dark-800 rounded-lg">
|
||||
<div className="flex items-center gap-2 text-sm">
|
||||
<Cpu size={14} className="text-violet-400" />
|
||||
<span className="text-dark-300">Solver:</span>
|
||||
<span className="text-white font-medium">{result.solver_type as string}</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Expressions Section */}
|
||||
<div className="border border-dark-700 rounded-lg overflow-hidden">
|
||||
<button
|
||||
onClick={() => toggleSection('expressions')}
|
||||
className="w-full flex items-center justify-between px-3 py-2 bg-dark-800 hover:bg-dark-750 transition-colors"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<SlidersHorizontal size={14} className="text-emerald-400" />
|
||||
<span className="text-sm font-medium text-white">
|
||||
Expressions ({filteredExpressions.length})
|
||||
</span>
|
||||
</div>
|
||||
{expandedSections.has('expressions') ? (
|
||||
<ChevronDown size={14} className="text-dark-400" />
|
||||
) : (
|
||||
<ChevronRight size={14} className="text-dark-400" />
|
||||
)}
|
||||
</button>
|
||||
|
||||
{expandedSections.has('expressions') && (
|
||||
<div className="p-2 space-y-1 max-h-48 overflow-y-auto">
|
||||
{filteredExpressions.length === 0 ? (
|
||||
<p className="text-xs text-dark-500 text-center py-2">
|
||||
No expressions found
|
||||
</p>
|
||||
) : (
|
||||
filteredExpressions.map((expr) => (
|
||||
<div
|
||||
key={expr.name}
|
||||
className="flex items-center justify-between p-2 bg-dark-850 rounded hover:bg-dark-750 group transition-colors"
|
||||
>
|
||||
<div className="flex-1 min-w-0">
|
||||
<p className="text-sm text-white truncate">{expr.name}</p>
|
||||
<p className="text-xs text-dark-500">
|
||||
{expr.value} {expr.units || expr.unit || ''}
|
||||
{expr.source === 'inferred' && (
|
||||
<span className="ml-1 text-amber-500">(inferred)</span>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => addExpressionAsDesignVar(expr)}
|
||||
className="p-1.5 text-dark-500 hover:text-primary-400 hover:bg-dark-700 rounded
|
||||
opacity-0 group-hover:opacity-100 transition-all"
|
||||
title="Add as Design Variable"
|
||||
>
|
||||
<Plus size={14} />
|
||||
</button>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Mass Properties Section */}
|
||||
{result.mass_properties && (
|
||||
<div className="border border-dark-700 rounded-lg overflow-hidden">
|
||||
<button
|
||||
onClick={() => toggleSection('mass')}
|
||||
className="w-full flex items-center justify-between px-3 py-2 bg-dark-800 hover:bg-dark-750 transition-colors"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<Scale size={14} className="text-blue-400" />
|
||||
<span className="text-sm font-medium text-white">Mass Properties</span>
|
||||
</div>
|
||||
{expandedSections.has('mass') ? (
|
||||
<ChevronDown size={14} className="text-dark-400" />
|
||||
) : (
|
||||
<ChevronRight size={14} className="text-dark-400" />
|
||||
)}
|
||||
</button>
|
||||
|
||||
{expandedSections.has('mass') && (
|
||||
<div className="p-2 space-y-1">
|
||||
{(result.mass_properties as Record<string, unknown>).mass_kg !== undefined && (
|
||||
<div className="flex justify-between p-2 bg-dark-850 rounded text-xs">
|
||||
<span className="text-dark-400">Mass</span>
|
||||
<span className="text-white font-mono">
|
||||
{((result.mass_properties as Record<string, unknown>).mass_kg as number).toFixed(4)} kg
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* More sections can be added here following the same pattern as the original IntrospectionPanel */}
|
||||
</div>
|
||||
) : (
|
||||
<div className="p-4 text-center text-dark-500 text-sm">
|
||||
Click refresh to analyze the model
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default FloatingIntrospectionPanel;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,8 +17,8 @@ import {
|
||||
useSelectedNodeId,
|
||||
useSelectedNode,
|
||||
} from '../../../hooks/useSpecStore';
|
||||
import { usePanelStore } from '../../../hooks/usePanelStore';
|
||||
import { FileBrowser } from './FileBrowser';
|
||||
import { IntrospectionPanel } from './IntrospectionPanel';
|
||||
import {
|
||||
DesignVariable,
|
||||
Extractor,
|
||||
@@ -43,7 +43,6 @@ export function NodeConfigPanelV2({ onClose }: NodeConfigPanelV2Props) {
|
||||
const { updateNode, removeNode, clearSelection } = useSpecStore();
|
||||
|
||||
const [showFileBrowser, setShowFileBrowser] = useState(false);
|
||||
const [showIntrospection, setShowIntrospection] = useState(false);
|
||||
const [isUpdating, setIsUpdating] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
@@ -249,15 +248,7 @@ export function NodeConfigPanelV2({ onClose }: NodeConfigPanelV2Props) {
|
||||
fileTypes={['.sim', '.prt', '.fem', '.afem']}
|
||||
/>
|
||||
|
||||
{/* Introspection Panel */}
|
||||
{showIntrospection && spec.model.sim?.path && (
|
||||
<div className="fixed top-20 right-96 z-40">
|
||||
<IntrospectionPanel
|
||||
filePath={spec.model.sim.path}
|
||||
onClose={() => setShowIntrospection(false)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{/* Introspection is now handled by FloatingIntrospectionPanel via usePanelStore */}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -271,7 +262,16 @@ interface SpecConfigProps {
|
||||
}
|
||||
|
||||
function ModelNodeConfig({ spec }: SpecConfigProps) {
|
||||
const [showIntrospection, setShowIntrospection] = useState(false);
|
||||
const { setIntrospectionData, openPanel } = usePanelStore();
|
||||
|
||||
const handleOpenIntrospection = () => {
|
||||
// Set up introspection data and open the panel
|
||||
setIntrospectionData({
|
||||
filePath: spec.model.sim?.path || '',
|
||||
studyId: useSpecStore.getState().studyId || undefined,
|
||||
});
|
||||
openPanel('introspection');
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -299,7 +299,7 @@ function ModelNodeConfig({ spec }: SpecConfigProps) {
|
||||
|
||||
{spec.model.sim?.path && (
|
||||
<button
|
||||
onClick={() => setShowIntrospection(true)}
|
||||
onClick={handleOpenIntrospection}
|
||||
className="w-full flex items-center justify-center gap-2 px-3 py-2.5 bg-primary-500/20
|
||||
hover:bg-primary-500/30 border border-primary-500/30 rounded-lg
|
||||
text-primary-400 text-sm font-medium transition-colors"
|
||||
@@ -308,32 +308,113 @@ function ModelNodeConfig({ spec }: SpecConfigProps) {
|
||||
Introspect Model
|
||||
</button>
|
||||
)}
|
||||
|
||||
{showIntrospection && spec.model.sim?.path && (
|
||||
<div className="fixed top-20 right-96 z-40">
|
||||
<IntrospectionPanel
|
||||
filePath={spec.model.sim.path}
|
||||
onClose={() => setShowIntrospection(false)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Note: IntrospectionPanel is now rendered by PanelContainer, not here */}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function SolverNodeConfig({ spec }: SpecConfigProps) {
|
||||
const { patchSpec } = useSpecStore();
|
||||
const [isUpdating, setIsUpdating] = useState(false);
|
||||
|
||||
const engine = spec.model.sim?.engine || 'nxnastran';
|
||||
const solutionType = spec.model.sim?.solution_type || 'SOL101';
|
||||
const scriptPath = spec.model.sim?.script_path || '';
|
||||
const isPython = engine === 'python';
|
||||
|
||||
const handleEngineChange = async (newEngine: string) => {
|
||||
setIsUpdating(true);
|
||||
try {
|
||||
await patchSpec('model.sim.engine', newEngine);
|
||||
} catch (err) {
|
||||
console.error('Failed to update engine:', err);
|
||||
} finally {
|
||||
setIsUpdating(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSolutionTypeChange = async (newType: string) => {
|
||||
setIsUpdating(true);
|
||||
try {
|
||||
await patchSpec('model.sim.solution_type', newType);
|
||||
} catch (err) {
|
||||
console.error('Failed to update solution type:', err);
|
||||
} finally {
|
||||
setIsUpdating(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleScriptPathChange = async (newPath: string) => {
|
||||
setIsUpdating(true);
|
||||
try {
|
||||
await patchSpec('model.sim.script_path', newPath);
|
||||
} catch (err) {
|
||||
console.error('Failed to update script path:', err);
|
||||
} finally {
|
||||
setIsUpdating(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<label className={labelClass}>Solution Type</label>
|
||||
<input
|
||||
type="text"
|
||||
value={spec.model.sim?.solution_type || 'Not configured'}
|
||||
readOnly
|
||||
className={`${inputClass} bg-dark-900 cursor-not-allowed`}
|
||||
title="Solver type is determined by the model file."
|
||||
/>
|
||||
<p className="text-xs text-dark-500 mt-1">Detected from model file.</p>
|
||||
</div>
|
||||
<>
|
||||
{isUpdating && (
|
||||
<div className="text-xs text-primary-400 animate-pulse">Updating...</div>
|
||||
)}
|
||||
|
||||
<div>
|
||||
<label className={labelClass}>Solver Engine</label>
|
||||
<select
|
||||
value={engine}
|
||||
onChange={(e) => handleEngineChange(e.target.value)}
|
||||
className={selectClass}
|
||||
>
|
||||
<option value="nxnastran">NX Nastran (built-in)</option>
|
||||
<option value="mscnastran">MSC Nastran (external)</option>
|
||||
<option value="python">Python Script</option>
|
||||
<option value="abaqus" disabled>Abaqus (coming soon)</option>
|
||||
<option value="ansys" disabled>ANSYS (coming soon)</option>
|
||||
</select>
|
||||
<p className="text-xs text-dark-500 mt-1">
|
||||
{isPython ? 'Run custom Python analysis script' : 'Select FEA solver software'}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{!isPython && (
|
||||
<div>
|
||||
<label className={labelClass}>Solution Type</label>
|
||||
<select
|
||||
value={solutionType}
|
||||
onChange={(e) => handleSolutionTypeChange(e.target.value)}
|
||||
className={selectClass}
|
||||
>
|
||||
<option value="SOL101">SOL101 - Linear Statics</option>
|
||||
<option value="SOL103">SOL103 - Normal Modes</option>
|
||||
<option value="SOL105">SOL105 - Buckling</option>
|
||||
<option value="SOL106">SOL106 - Nonlinear Statics</option>
|
||||
<option value="SOL111">SOL111 - Modal Frequency Response</option>
|
||||
<option value="SOL112">SOL112 - Modal Transient Response</option>
|
||||
<option value="SOL200">SOL200 - Design Optimization</option>
|
||||
</select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{isPython && (
|
||||
<div>
|
||||
<label className={labelClass}>Script Path</label>
|
||||
<input
|
||||
type="text"
|
||||
value={scriptPath}
|
||||
onChange={(e) => handleScriptPathChange(e.target.value)}
|
||||
placeholder="path/to/solver_script.py"
|
||||
className={`${inputClass} font-mono text-sm`}
|
||||
/>
|
||||
<p className="text-xs text-dark-500 mt-1">
|
||||
Python script must define solve(params) function
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -694,38 +775,21 @@ function ExtractorNodeConfig({ node, onChange }: ExtractorNodeConfigProps) {
|
||||
{showCodeEditor && (
|
||||
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/60 backdrop-blur-sm">
|
||||
<div className="w-[900px] h-[700px] bg-dark-850 rounded-xl overflow-hidden shadow-2xl border border-dark-600 flex flex-col">
|
||||
{/* Modal Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700 bg-dark-900">
|
||||
<div className="flex items-center gap-3">
|
||||
<FileCode size={18} className="text-violet-400" />
|
||||
<span className="font-medium text-white">Custom Extractor: {node.name}</span>
|
||||
<span className="text-xs text-dark-500 bg-dark-800 px-2 py-0.5 rounded">.py</span>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => setShowCodeEditor(false)}
|
||||
className="p-1.5 rounded hover:bg-dark-700 text-dark-400 hover:text-white transition-colors"
|
||||
>
|
||||
<X size={18} />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Code Editor */}
|
||||
<div className="flex-1">
|
||||
<CodeEditorPanel
|
||||
initialCode={currentCode}
|
||||
extractorName={node.name}
|
||||
outputs={node.outputs?.map(o => o.name) || []}
|
||||
onChange={handleCodeChange}
|
||||
onRequestGeneration={handleRequestGeneration}
|
||||
onRequestStreamingGeneration={handleStreamingGeneration}
|
||||
onRun={handleValidateCode}
|
||||
onTest={handleTestCode}
|
||||
onClose={() => setShowCodeEditor(false)}
|
||||
showHeader={false}
|
||||
height="100%"
|
||||
studyId={studyId || undefined}
|
||||
/>
|
||||
</div>
|
||||
{/* Code Editor with built-in header containing toolbar buttons */}
|
||||
<CodeEditorPanel
|
||||
initialCode={currentCode}
|
||||
extractorName={`Custom Extractor: ${node.name}`}
|
||||
outputs={node.outputs?.map(o => o.name) || []}
|
||||
onChange={handleCodeChange}
|
||||
onRequestGeneration={handleRequestGeneration}
|
||||
onRequestStreamingGeneration={handleStreamingGeneration}
|
||||
onRun={handleValidateCode}
|
||||
onTest={handleTestCode}
|
||||
onClose={() => setShowCodeEditor(false)}
|
||||
showHeader={true}
|
||||
height="100%"
|
||||
studyId={studyId || undefined}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -0,0 +1,207 @@
|
||||
/**
|
||||
* PanelContainer - Orchestrates all floating panels in the canvas view
|
||||
*
|
||||
* This component renders floating panels (Introspection, Validation, Error, Results)
|
||||
* in a portal, positioned absolutely within the canvas area.
|
||||
*
|
||||
* Features:
|
||||
* - Draggable panels
|
||||
* - Z-index management (click to bring to front)
|
||||
* - Keyboard shortcuts (Escape to close all)
|
||||
* - Position persistence via usePanelStore
|
||||
*/
|
||||
|
||||
import { useState, useCallback, useEffect, useRef } from 'react';
|
||||
import { createPortal } from 'react-dom';
|
||||
import {
|
||||
usePanelStore,
|
||||
useIntrospectionPanel,
|
||||
useValidationPanel,
|
||||
useErrorPanel,
|
||||
useResultsPanel,
|
||||
PanelPosition,
|
||||
} from '../../../hooks/usePanelStore';
|
||||
import { FloatingIntrospectionPanel } from './FloatingIntrospectionPanel';
|
||||
import { FloatingValidationPanel } from './ValidationPanel';
|
||||
import { ErrorPanel } from './ErrorPanel';
|
||||
import { ResultsPanel } from './ResultsPanel';
|
||||
|
||||
interface PanelContainerProps {
|
||||
/** Container element to render panels into (defaults to document.body) */
|
||||
container?: HTMLElement;
|
||||
/** Callback when retry is requested from error panel */
|
||||
onRetry?: (trial?: number) => void;
|
||||
/** Callback when skip trial is requested */
|
||||
onSkipTrial?: (trial: number) => void;
|
||||
}
|
||||
|
||||
type PanelName = 'introspection' | 'validation' | 'error' | 'results';
|
||||
|
||||
export function PanelContainer({ container, onRetry, onSkipTrial }: PanelContainerProps) {
|
||||
const { closePanel, setPanelPosition, closeAllPanels } = usePanelStore();
|
||||
|
||||
const introspectionPanel = useIntrospectionPanel();
|
||||
const validationPanel = useValidationPanel();
|
||||
const errorPanel = useErrorPanel();
|
||||
const resultsPanel = useResultsPanel();
|
||||
|
||||
// Track which panel is on top (for z-index)
|
||||
const [topPanel, setTopPanel] = useState<PanelName | null>(null);
|
||||
|
||||
// Dragging state
|
||||
const [dragging, setDragging] = useState<{ panel: PanelName; offset: { x: number; y: number } } | null>(null);
|
||||
const dragRef = useRef<{ panel: PanelName; offset: { x: number; y: number } } | null>(null);
|
||||
|
||||
// Escape key to close all panels
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Escape') {
|
||||
closeAllPanels();
|
||||
}
|
||||
};
|
||||
window.addEventListener('keydown', handleKeyDown);
|
||||
return () => window.removeEventListener('keydown', handleKeyDown);
|
||||
}, [closeAllPanels]);
|
||||
|
||||
// Mouse move handler for dragging
|
||||
useEffect(() => {
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
if (!dragRef.current) return;
|
||||
|
||||
const { panel, offset } = dragRef.current;
|
||||
const newPosition: PanelPosition = {
|
||||
x: e.clientX - offset.x,
|
||||
y: e.clientY - offset.y,
|
||||
};
|
||||
|
||||
// Clamp to viewport
|
||||
newPosition.x = Math.max(0, Math.min(window.innerWidth - 100, newPosition.x));
|
||||
newPosition.y = Math.max(0, Math.min(window.innerHeight - 50, newPosition.y));
|
||||
|
||||
setPanelPosition(panel, newPosition);
|
||||
};
|
||||
|
||||
const handleMouseUp = () => {
|
||||
dragRef.current = null;
|
||||
setDragging(null);
|
||||
};
|
||||
|
||||
if (dragging) {
|
||||
window.addEventListener('mousemove', handleMouseMove);
|
||||
window.addEventListener('mouseup', handleMouseUp);
|
||||
}
|
||||
|
||||
return () => {
|
||||
window.removeEventListener('mousemove', handleMouseMove);
|
||||
window.removeEventListener('mouseup', handleMouseUp);
|
||||
};
|
||||
}, [dragging, setPanelPosition]);
|
||||
|
||||
// Start dragging a panel
|
||||
const handleDragStart = useCallback((panel: PanelName, e: React.MouseEvent, position: PanelPosition) => {
|
||||
const offset = {
|
||||
x: e.clientX - position.x,
|
||||
y: e.clientY - position.y,
|
||||
};
|
||||
dragRef.current = { panel, offset };
|
||||
setDragging({ panel, offset });
|
||||
setTopPanel(panel);
|
||||
}, []);
|
||||
|
||||
// Click to bring panel to front
|
||||
const handlePanelClick = useCallback((panel: PanelName) => {
|
||||
setTopPanel(panel);
|
||||
}, []);
|
||||
|
||||
// Get z-index for a panel
|
||||
const getZIndex = (panel: PanelName) => {
|
||||
const baseZ = 100;
|
||||
if (panel === topPanel) return baseZ + 10;
|
||||
return baseZ;
|
||||
};
|
||||
|
||||
// Render a draggable wrapper
|
||||
const renderDraggable = (
|
||||
panel: PanelName,
|
||||
position: PanelPosition,
|
||||
isOpen: boolean,
|
||||
children: React.ReactNode
|
||||
) => {
|
||||
if (!isOpen) return null;
|
||||
|
||||
return (
|
||||
<div
|
||||
key={panel}
|
||||
className="fixed select-none"
|
||||
style={{
|
||||
left: position.x,
|
||||
top: position.y,
|
||||
zIndex: getZIndex(panel),
|
||||
cursor: dragging?.panel === panel ? 'grabbing' : 'default',
|
||||
}}
|
||||
onClick={() => handlePanelClick(panel)}
|
||||
>
|
||||
{/* Drag handle - the header area */}
|
||||
<div
|
||||
className="absolute top-0 left-0 right-0 h-12 cursor-grab active:cursor-grabbing"
|
||||
onMouseDown={(e) => handleDragStart(panel, e, position)}
|
||||
style={{ zIndex: 1 }}
|
||||
/>
|
||||
{/* Panel content */}
|
||||
<div className="relative" style={{ zIndex: 0 }}>
|
||||
{children}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Determine what to render
|
||||
const panels = (
|
||||
<>
|
||||
{/* Introspection Panel */}
|
||||
{renderDraggable(
|
||||
'introspection',
|
||||
introspectionPanel.position || { x: 100, y: 100 },
|
||||
introspectionPanel.open,
|
||||
<FloatingIntrospectionPanel onClose={() => closePanel('introspection')} />
|
||||
)}
|
||||
|
||||
{/* Validation Panel */}
|
||||
{renderDraggable(
|
||||
'validation',
|
||||
validationPanel.position || { x: 150, y: 150 },
|
||||
validationPanel.open,
|
||||
<FloatingValidationPanel onClose={() => closePanel('validation')} />
|
||||
)}
|
||||
|
||||
{/* Error Panel */}
|
||||
{renderDraggable(
|
||||
'error',
|
||||
errorPanel.position || { x: 200, y: 100 },
|
||||
errorPanel.open,
|
||||
<ErrorPanel
|
||||
onClose={() => closePanel('error')}
|
||||
onRetry={onRetry}
|
||||
onSkipTrial={onSkipTrial}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Results Panel */}
|
||||
{renderDraggable(
|
||||
'results',
|
||||
resultsPanel.position || { x: 250, y: 150 },
|
||||
resultsPanel.open,
|
||||
<ResultsPanel onClose={() => closePanel('results')} />
|
||||
)}
|
||||
</>
|
||||
);
|
||||
|
||||
// Use portal if container specified, otherwise render in place
|
||||
if (container) {
|
||||
return createPortal(panels, container);
|
||||
}
|
||||
|
||||
return panels;
|
||||
}
|
||||
|
||||
export default PanelContainer;
|
||||
@@ -0,0 +1,179 @@
|
||||
/**
|
||||
* ResultsPanel - Shows detailed trial results
|
||||
*
|
||||
* Displays the parameters, objectives, and constraints for a specific trial.
|
||||
* Can be opened by clicking on result badges on nodes.
|
||||
*/
|
||||
|
||||
import {
|
||||
X,
|
||||
Minimize2,
|
||||
Maximize2,
|
||||
CheckCircle,
|
||||
XCircle,
|
||||
Trophy,
|
||||
SlidersHorizontal,
|
||||
Target,
|
||||
AlertTriangle,
|
||||
Clock,
|
||||
} from 'lucide-react';
|
||||
import { useResultsPanel, usePanelStore } from '../../../hooks/usePanelStore';
|
||||
|
||||
interface ResultsPanelProps {
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
export function ResultsPanel({ onClose }: ResultsPanelProps) {
|
||||
const panel = useResultsPanel();
|
||||
const { minimizePanel } = usePanelStore();
|
||||
const data = panel.data;
|
||||
|
||||
if (!panel.open || !data) return null;
|
||||
|
||||
const timestamp = new Date(data.timestamp).toLocaleTimeString();
|
||||
|
||||
// Minimized view
|
||||
if (panel.minimized) {
|
||||
return (
|
||||
<div
|
||||
className="bg-dark-850 border border-dark-700 rounded-lg shadow-xl flex items-center gap-2 px-3 py-2 cursor-pointer hover:bg-dark-800 transition-colors"
|
||||
onClick={() => minimizePanel('results')}
|
||||
>
|
||||
<Trophy size={16} className={data.isBest ? 'text-amber-400' : 'text-dark-400'} />
|
||||
<span className="text-sm text-white font-medium">
|
||||
Trial #{data.trialNumber}
|
||||
</span>
|
||||
<Maximize2 size={14} className="text-dark-400" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="bg-dark-850 border border-dark-700 rounded-xl w-80 max-h-[500px] flex flex-col shadow-xl">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700">
|
||||
<div className="flex items-center gap-2">
|
||||
<Trophy size={18} className={data.isBest ? 'text-amber-400' : 'text-dark-400'} />
|
||||
<span className="font-medium text-white">
|
||||
Trial #{data.trialNumber}
|
||||
</span>
|
||||
{data.isBest && (
|
||||
<span className="px-1.5 py-0.5 text-xs bg-amber-500/20 text-amber-400 rounded">
|
||||
Best
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<button
|
||||
onClick={() => minimizePanel('results')}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
title="Minimize"
|
||||
>
|
||||
<Minimize2 size={14} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-y-auto p-3 space-y-4">
|
||||
{/* Status */}
|
||||
<div className="flex items-center gap-3">
|
||||
{data.isFeasible ? (
|
||||
<div className="flex items-center gap-1.5 text-green-400">
|
||||
<CheckCircle size={16} />
|
||||
<span className="text-sm font-medium">Feasible</span>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center gap-1.5 text-red-400">
|
||||
<XCircle size={16} />
|
||||
<span className="text-sm font-medium">Infeasible</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex items-center gap-1.5 text-dark-400 ml-auto">
|
||||
<Clock size={14} />
|
||||
<span className="text-xs">{timestamp}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Parameters */}
|
||||
<div>
|
||||
<h4 className="text-xs font-medium text-dark-400 uppercase tracking-wide mb-2 flex items-center gap-1.5">
|
||||
<SlidersHorizontal size={12} />
|
||||
Parameters
|
||||
</h4>
|
||||
<div className="space-y-1">
|
||||
{Object.entries(data.params).map(([name, value]) => (
|
||||
<div key={name} className="flex justify-between p-2 bg-dark-800 rounded text-sm">
|
||||
<span className="text-dark-300">{name}</span>
|
||||
<span className="text-white font-mono">{formatValue(value)}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Objectives */}
|
||||
<div>
|
||||
<h4 className="text-xs font-medium text-dark-400 uppercase tracking-wide mb-2 flex items-center gap-1.5">
|
||||
<Target size={12} />
|
||||
Objectives
|
||||
</h4>
|
||||
<div className="space-y-1">
|
||||
{Object.entries(data.objectives).map(([name, value]) => (
|
||||
<div key={name} className="flex justify-between p-2 bg-dark-800 rounded text-sm">
|
||||
<span className="text-dark-300">{name}</span>
|
||||
<span className="text-primary-400 font-mono">{formatValue(value)}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Constraints (if any) */}
|
||||
{data.constraints && Object.keys(data.constraints).length > 0 && (
|
||||
<div>
|
||||
<h4 className="text-xs font-medium text-dark-400 uppercase tracking-wide mb-2 flex items-center gap-1.5">
|
||||
<AlertTriangle size={12} />
|
||||
Constraints
|
||||
</h4>
|
||||
<div className="space-y-1">
|
||||
{Object.entries(data.constraints).map(([name, constraint]) => (
|
||||
<div
|
||||
key={name}
|
||||
className={`flex justify-between p-2 rounded text-sm ${
|
||||
constraint.feasible ? 'bg-dark-800' : 'bg-red-500/10 border border-red-500/20'
|
||||
}`}
|
||||
>
|
||||
<span className="text-dark-300 flex items-center gap-1.5">
|
||||
{constraint.feasible ? (
|
||||
<CheckCircle size={12} className="text-green-400" />
|
||||
) : (
|
||||
<XCircle size={12} className="text-red-400" />
|
||||
)}
|
||||
{name}
|
||||
</span>
|
||||
<span className={`font-mono ${constraint.feasible ? 'text-white' : 'text-red-400'}`}>
|
||||
{formatValue(constraint.value)}
|
||||
</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function formatValue(value: number): string {
|
||||
if (Math.abs(value) < 0.001 || Math.abs(value) >= 10000) {
|
||||
return value.toExponential(3);
|
||||
}
|
||||
return value.toFixed(4).replace(/\.?0+$/, '');
|
||||
}
|
||||
|
||||
export default ResultsPanel;
|
||||
@@ -1,10 +1,41 @@
|
||||
/**
|
||||
* ValidationPanel - Displays spec validation errors and warnings
|
||||
*
|
||||
* Shows a list of validation issues that need to be fixed before
|
||||
* running an optimization. Supports auto-navigation to problematic nodes.
|
||||
*
|
||||
* Can be used in two modes:
|
||||
* 1. Legacy mode: Pass validation prop directly (for backward compatibility)
|
||||
* 2. Store mode: Uses usePanelStore for persistent state
|
||||
*/
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import {
|
||||
X,
|
||||
AlertCircle,
|
||||
AlertTriangle,
|
||||
CheckCircle,
|
||||
ChevronRight,
|
||||
Minimize2,
|
||||
Maximize2,
|
||||
} from 'lucide-react';
|
||||
import { useValidationPanel, usePanelStore, ValidationError as StoreValidationError } from '../../../hooks/usePanelStore';
|
||||
import { useSpecStore } from '../../../hooks/useSpecStore';
|
||||
import { ValidationResult } from '../../../lib/canvas/validation';
|
||||
|
||||
interface ValidationPanelProps {
|
||||
// ============================================================================
|
||||
// Legacy Props Interface (for backward compatibility)
|
||||
// ============================================================================
|
||||
|
||||
interface LegacyValidationPanelProps {
|
||||
validation: ValidationResult;
|
||||
}
|
||||
|
||||
export function ValidationPanel({ validation }: ValidationPanelProps) {
|
||||
/**
|
||||
* Legacy ValidationPanel - Inline display for canvas overlay
|
||||
* Kept for backward compatibility with AtomizerCanvas
|
||||
*/
|
||||
export function ValidationPanel({ validation }: LegacyValidationPanelProps) {
|
||||
return (
|
||||
<div className="absolute top-4 left-1/2 transform -translate-x-1/2 max-w-md w-full z-10">
|
||||
{validation.errors.length > 0 && (
|
||||
@@ -30,3 +61,199 @@ export function ValidationPanel({ validation }: ValidationPanelProps) {
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// New Floating Panel (uses store)
|
||||
// ============================================================================
|
||||
|
||||
interface FloatingValidationPanelProps {
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
export function FloatingValidationPanel({ onClose }: FloatingValidationPanelProps) {
|
||||
const panel = useValidationPanel();
|
||||
const { minimizePanel } = usePanelStore();
|
||||
const { selectNode } = useSpecStore();
|
||||
|
||||
const { errors, warnings, valid } = useMemo(() => {
|
||||
if (!panel.data) {
|
||||
return { errors: [], warnings: [], valid: true };
|
||||
}
|
||||
return {
|
||||
errors: panel.data.errors || [],
|
||||
warnings: panel.data.warnings || [],
|
||||
valid: panel.data.valid,
|
||||
};
|
||||
}, [panel.data]);
|
||||
|
||||
const handleNavigateToNode = (nodeId?: string) => {
|
||||
if (nodeId) {
|
||||
selectNode(nodeId);
|
||||
}
|
||||
};
|
||||
|
||||
if (!panel.open) return null;
|
||||
|
||||
// Minimized view
|
||||
if (panel.minimized) {
|
||||
return (
|
||||
<div
|
||||
className="bg-dark-850 border border-dark-700 rounded-lg shadow-xl flex items-center gap-2 px-3 py-2 cursor-pointer hover:bg-dark-800 transition-colors"
|
||||
onClick={() => minimizePanel('validation')}
|
||||
>
|
||||
{valid ? (
|
||||
<CheckCircle size={16} className="text-green-400" />
|
||||
) : (
|
||||
<AlertCircle size={16} className="text-red-400" />
|
||||
)}
|
||||
<span className="text-sm text-white font-medium">
|
||||
Validation {valid ? 'Passed' : `(${errors.length} errors)`}
|
||||
</span>
|
||||
<Maximize2 size={14} className="text-dark-400" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="bg-dark-850 border border-dark-700 rounded-xl w-96 max-h-[500px] flex flex-col shadow-xl">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700">
|
||||
<div className="flex items-center gap-2">
|
||||
{valid ? (
|
||||
<CheckCircle size={18} className="text-green-400" />
|
||||
) : (
|
||||
<AlertCircle size={18} className="text-red-400" />
|
||||
)}
|
||||
<span className="font-medium text-white">
|
||||
{valid ? 'Validation Passed' : 'Validation Issues'}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<button
|
||||
onClick={() => minimizePanel('validation')}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
title="Minimize"
|
||||
>
|
||||
<Minimize2 size={14} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1.5 text-dark-400 hover:text-white hover:bg-dark-700 rounded transition-colors"
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-y-auto p-3 space-y-2">
|
||||
{valid && errors.length === 0 && warnings.length === 0 ? (
|
||||
<div className="flex flex-col items-center justify-center py-8 text-center">
|
||||
<CheckCircle size={40} className="text-green-400 mb-3" />
|
||||
<p className="text-white font-medium">All checks passed!</p>
|
||||
<p className="text-sm text-dark-400 mt-1">
|
||||
Your spec is ready to run.
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
{/* Errors */}
|
||||
{errors.length > 0 && (
|
||||
<div className="space-y-2">
|
||||
<h4 className="text-xs font-medium text-red-400 uppercase tracking-wide flex items-center gap-1">
|
||||
<AlertCircle size={12} />
|
||||
Errors ({errors.length})
|
||||
</h4>
|
||||
{errors.map((error, idx) => (
|
||||
<ValidationItem
|
||||
key={`error-${idx}`}
|
||||
item={error}
|
||||
severity="error"
|
||||
onNavigate={() => handleNavigateToNode(error.nodeId)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Warnings */}
|
||||
{warnings.length > 0 && (
|
||||
<div className="space-y-2 mt-4">
|
||||
<h4 className="text-xs font-medium text-amber-400 uppercase tracking-wide flex items-center gap-1">
|
||||
<AlertTriangle size={12} />
|
||||
Warnings ({warnings.length})
|
||||
</h4>
|
||||
{warnings.map((warning, idx) => (
|
||||
<ValidationItem
|
||||
key={`warning-${idx}`}
|
||||
item={warning}
|
||||
severity="warning"
|
||||
onNavigate={() => handleNavigateToNode(warning.nodeId)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
{!valid && (
|
||||
<div className="px-4 py-3 border-t border-dark-700 bg-dark-800/50">
|
||||
<p className="text-xs text-dark-400">
|
||||
Fix all errors before running the optimization.
|
||||
Warnings can be ignored but may cause issues.
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Validation Item Component
|
||||
// ============================================================================
|
||||
|
||||
interface ValidationItemProps {
|
||||
item: StoreValidationError;
|
||||
severity: 'error' | 'warning';
|
||||
onNavigate: () => void;
|
||||
}
|
||||
|
||||
function ValidationItem({ item, severity, onNavigate }: ValidationItemProps) {
|
||||
const isError = severity === 'error';
|
||||
const bgColor = isError ? 'bg-red-500/10' : 'bg-amber-500/10';
|
||||
const borderColor = isError ? 'border-red-500/30' : 'border-amber-500/30';
|
||||
const iconColor = isError ? 'text-red-400' : 'text-amber-400';
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`p-3 rounded-lg border ${bgColor} ${borderColor} group cursor-pointer hover:bg-opacity-20 transition-colors`}
|
||||
onClick={onNavigate}
|
||||
>
|
||||
<div className="flex items-start gap-2">
|
||||
{isError ? (
|
||||
<AlertCircle size={16} className={`${iconColor} flex-shrink-0 mt-0.5`} />
|
||||
) : (
|
||||
<AlertTriangle size={16} className={`${iconColor} flex-shrink-0 mt-0.5`} />
|
||||
)}
|
||||
<div className="flex-1 min-w-0">
|
||||
<p className="text-sm text-white">{item.message}</p>
|
||||
{item.path && (
|
||||
<p className="text-xs text-dark-400 mt-1 font-mono">{item.path}</p>
|
||||
)}
|
||||
{item.suggestion && (
|
||||
<p className="text-xs text-dark-300 mt-2 italic">{item.suggestion}</p>
|
||||
)}
|
||||
</div>
|
||||
{item.nodeId && (
|
||||
<ChevronRight
|
||||
size={16}
|
||||
className="text-dark-500 group-hover:text-white transition-colors flex-shrink-0"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default ValidationPanel;
|
||||
|
||||
@@ -0,0 +1,240 @@
|
||||
/**
|
||||
* ConvergenceSparkline - Tiny SVG chart showing optimization convergence
|
||||
*
|
||||
* Displays the last N trial values as a mini line chart.
|
||||
* Used on ObjectiveNode to show convergence trend.
|
||||
*/
|
||||
|
||||
import { useMemo } from 'react';
|
||||
|
||||
interface ConvergenceSparklineProps {
|
||||
/** Array of values (most recent last) */
|
||||
values: number[];
|
||||
/** Width in pixels */
|
||||
width?: number;
|
||||
/** Height in pixels */
|
||||
height?: number;
|
||||
/** Line color */
|
||||
color?: string;
|
||||
/** Best value line color */
|
||||
bestColor?: string;
|
||||
/** Whether to show the best value line */
|
||||
showBest?: boolean;
|
||||
/** Direction: minimize shows lower as better, maximize shows higher as better */
|
||||
direction?: 'minimize' | 'maximize';
|
||||
/** Show dots at each point */
|
||||
showDots?: boolean;
|
||||
/** Number of points to display */
|
||||
maxPoints?: number;
|
||||
}
|
||||
|
||||
export function ConvergenceSparkline({
|
||||
values,
|
||||
width = 80,
|
||||
height = 24,
|
||||
color = '#60a5fa',
|
||||
bestColor = '#34d399',
|
||||
showBest = true,
|
||||
direction = 'minimize',
|
||||
showDots = false,
|
||||
maxPoints = 20,
|
||||
}: ConvergenceSparklineProps) {
|
||||
const { path, bestY, points } = useMemo(() => {
|
||||
if (!values || values.length === 0) {
|
||||
return { path: '', bestY: null, points: [], minVal: 0, maxVal: 1 };
|
||||
}
|
||||
|
||||
// Take last N points
|
||||
const data = values.slice(-maxPoints);
|
||||
if (data.length === 0) {
|
||||
return { path: '', bestY: null, points: [], minVal: 0, maxVal: 1 };
|
||||
}
|
||||
|
||||
// Calculate bounds with padding
|
||||
const minVal = Math.min(...data);
|
||||
const maxVal = Math.max(...data);
|
||||
const range = maxVal - minVal || 1;
|
||||
const padding = range * 0.1;
|
||||
const yMin = minVal - padding;
|
||||
const yMax = maxVal + padding;
|
||||
const yRange = yMax - yMin;
|
||||
|
||||
// Calculate best value
|
||||
const bestVal = direction === 'minimize' ? Math.min(...data) : Math.max(...data);
|
||||
|
||||
// Map values to SVG coordinates
|
||||
const xStep = width / Math.max(data.length - 1, 1);
|
||||
const mapY = (v: number) => height - ((v - yMin) / yRange) * height;
|
||||
|
||||
// Build path
|
||||
const points = data.map((v, i) => ({
|
||||
x: i * xStep,
|
||||
y: mapY(v),
|
||||
value: v,
|
||||
}));
|
||||
|
||||
const pathParts = points.map((p, i) =>
|
||||
i === 0 ? `M ${p.x} ${p.y}` : `L ${p.x} ${p.y}`
|
||||
);
|
||||
|
||||
return {
|
||||
path: pathParts.join(' '),
|
||||
bestY: mapY(bestVal),
|
||||
points,
|
||||
minVal,
|
||||
maxVal,
|
||||
};
|
||||
}, [values, width, height, maxPoints, direction]);
|
||||
|
||||
if (!values || values.length === 0) {
|
||||
return (
|
||||
<div
|
||||
className="flex items-center justify-center text-dark-500 text-xs"
|
||||
style={{ width, height }}
|
||||
>
|
||||
No data
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<svg
|
||||
width={width}
|
||||
height={height}
|
||||
className="overflow-visible"
|
||||
viewBox={`0 0 ${width} ${height}`}
|
||||
>
|
||||
{/* Best value line */}
|
||||
{showBest && bestY !== null && (
|
||||
<line
|
||||
x1={0}
|
||||
y1={bestY}
|
||||
x2={width}
|
||||
y2={bestY}
|
||||
stroke={bestColor}
|
||||
strokeWidth={1}
|
||||
strokeDasharray="2,2"
|
||||
opacity={0.5}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Main line */}
|
||||
<path
|
||||
d={path}
|
||||
fill="none"
|
||||
stroke={color}
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
|
||||
{/* Gradient fill under the line */}
|
||||
<defs>
|
||||
<linearGradient id="sparkline-gradient" x1="0%" y1="0%" x2="0%" y2="100%">
|
||||
<stop offset="0%" stopColor={color} stopOpacity={0.3} />
|
||||
<stop offset="100%" stopColor={color} stopOpacity={0} />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
|
||||
{points.length > 1 && (
|
||||
<path
|
||||
d={`${path} L ${points[points.length - 1].x} ${height} L ${points[0].x} ${height} Z`}
|
||||
fill="url(#sparkline-gradient)"
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Dots at each point */}
|
||||
{showDots && points.map((p, i) => (
|
||||
<circle
|
||||
key={i}
|
||||
cx={p.x}
|
||||
cy={p.y}
|
||||
r={2}
|
||||
fill={color}
|
||||
/>
|
||||
))}
|
||||
|
||||
{/* Last point highlight */}
|
||||
{points.length > 0 && (
|
||||
<circle
|
||||
cx={points[points.length - 1].x}
|
||||
cy={points[points.length - 1].y}
|
||||
r={3}
|
||||
fill={color}
|
||||
stroke="white"
|
||||
strokeWidth={1}
|
||||
/>
|
||||
)}
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* ProgressRing - Circular progress indicator
|
||||
*/
|
||||
interface ProgressRingProps {
|
||||
/** Progress percentage (0-100) */
|
||||
progress: number;
|
||||
/** Size in pixels */
|
||||
size?: number;
|
||||
/** Stroke width */
|
||||
strokeWidth?: number;
|
||||
/** Progress color */
|
||||
color?: string;
|
||||
/** Background color */
|
||||
bgColor?: string;
|
||||
/** Show percentage text */
|
||||
showText?: boolean;
|
||||
}
|
||||
|
||||
export function ProgressRing({
|
||||
progress,
|
||||
size = 32,
|
||||
strokeWidth = 3,
|
||||
color = '#60a5fa',
|
||||
bgColor = '#374151',
|
||||
showText = true,
|
||||
}: ProgressRingProps) {
|
||||
const radius = (size - strokeWidth) / 2;
|
||||
const circumference = radius * 2 * Math.PI;
|
||||
const offset = circumference - (Math.min(100, Math.max(0, progress)) / 100) * circumference;
|
||||
|
||||
return (
|
||||
<div className="relative inline-flex items-center justify-center" style={{ width: size, height: size }}>
|
||||
<svg width={size} height={size} className="transform -rotate-90">
|
||||
{/* Background circle */}
|
||||
<circle
|
||||
cx={size / 2}
|
||||
cy={size / 2}
|
||||
r={radius}
|
||||
fill="none"
|
||||
stroke={bgColor}
|
||||
strokeWidth={strokeWidth}
|
||||
/>
|
||||
{/* Progress circle */}
|
||||
<circle
|
||||
cx={size / 2}
|
||||
cy={size / 2}
|
||||
r={radius}
|
||||
fill="none"
|
||||
stroke={color}
|
||||
strokeWidth={strokeWidth}
|
||||
strokeDasharray={circumference}
|
||||
strokeDashoffset={offset}
|
||||
strokeLinecap="round"
|
||||
className="transition-all duration-300"
|
||||
/>
|
||||
</svg>
|
||||
{showText && (
|
||||
<span
|
||||
className="absolute text-xs font-medium"
|
||||
style={{ color, fontSize: size * 0.25 }}
|
||||
>
|
||||
{Math.round(progress)}%
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default ConvergenceSparkline;
|
||||
@@ -5,7 +5,7 @@ import { ToolCallCard, ToolCall } from './ToolCallCard';
|
||||
|
||||
export interface Message {
|
||||
id: string;
|
||||
role: 'user' | 'assistant';
|
||||
role: 'user' | 'assistant' | 'system';
|
||||
content: string;
|
||||
timestamp: Date;
|
||||
isStreaming?: boolean;
|
||||
@@ -18,6 +18,18 @@ interface ChatMessageProps {
|
||||
|
||||
export const ChatMessage: React.FC<ChatMessageProps> = ({ message }) => {
|
||||
const isAssistant = message.role === 'assistant';
|
||||
const isSystem = message.role === 'system';
|
||||
|
||||
// System messages are displayed centered with special styling
|
||||
if (isSystem) {
|
||||
return (
|
||||
<div className="flex justify-center my-2">
|
||||
<div className="px-3 py-1 bg-dark-700/50 rounded-full text-xs text-dark-400 border border-dark-600">
|
||||
{message.content}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { useRef, useEffect, useState } from 'react';
|
||||
import React, { useRef, useEffect, useState, useMemo } from 'react';
|
||||
import {
|
||||
MessageSquare,
|
||||
ChevronRight,
|
||||
@@ -13,8 +13,10 @@ import { ChatMessage } from './ChatMessage';
|
||||
import { ChatInput } from './ChatInput';
|
||||
import { ThinkingIndicator } from './ThinkingIndicator';
|
||||
import { ModeToggle } from './ModeToggle';
|
||||
import { useChat } from '../../hooks/useChat';
|
||||
import { useChat, CanvasState, CanvasModification } from '../../hooks/useChat';
|
||||
import { useStudy } from '../../context/StudyContext';
|
||||
import { useCanvasStore } from '../../hooks/useCanvasStore';
|
||||
import { NodeType } from '../../lib/canvas/schema';
|
||||
|
||||
interface ChatPaneProps {
|
||||
isOpen: boolean;
|
||||
@@ -31,6 +33,76 @@ export const ChatPane: React.FC<ChatPaneProps> = ({
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
|
||||
// Get canvas state and modification functions from the store
|
||||
const { nodes, edges, addNode, updateNodeData, selectNode, deleteSelected } = useCanvasStore();
|
||||
|
||||
// Build canvas state for chat context
|
||||
const canvasState: CanvasState | null = useMemo(() => {
|
||||
if (nodes.length === 0) return null;
|
||||
return {
|
||||
nodes: nodes.map(n => ({
|
||||
id: n.id,
|
||||
type: n.type,
|
||||
data: n.data,
|
||||
position: n.position,
|
||||
})),
|
||||
edges: edges.map(e => ({
|
||||
id: e.id,
|
||||
source: e.source,
|
||||
target: e.target,
|
||||
})),
|
||||
studyName: selectedStudy?.name || selectedStudy?.id,
|
||||
};
|
||||
}, [nodes, edges, selectedStudy]);
|
||||
|
||||
// Track position offset for multiple node additions
|
||||
const nodeAddCountRef = useRef(0);
|
||||
|
||||
// Handle canvas modifications from the assistant
|
||||
const handleCanvasModification = React.useCallback((modification: CanvasModification) => {
|
||||
console.log('Canvas modification from assistant:', modification);
|
||||
|
||||
switch (modification.action) {
|
||||
case 'add_node':
|
||||
if (modification.nodeType) {
|
||||
const nodeType = modification.nodeType as NodeType;
|
||||
// Calculate position: offset each new node so they don't stack
|
||||
const basePosition = modification.position || { x: 100, y: 100 };
|
||||
const offset = nodeAddCountRef.current * 120;
|
||||
const position = {
|
||||
x: basePosition.x,
|
||||
y: basePosition.y + offset,
|
||||
};
|
||||
nodeAddCountRef.current += 1;
|
||||
// Reset counter after a delay (for batch operations)
|
||||
setTimeout(() => { nodeAddCountRef.current = 0; }, 2000);
|
||||
|
||||
addNode(nodeType, position, modification.data);
|
||||
console.log(`Added ${nodeType} node at position:`, position);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'update_node':
|
||||
if (modification.nodeId && modification.data) {
|
||||
updateNodeData(modification.nodeId, modification.data);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'remove_node':
|
||||
if (modification.nodeId) {
|
||||
selectNode(modification.nodeId);
|
||||
deleteSelected();
|
||||
}
|
||||
break;
|
||||
|
||||
// Edge operations would need additional store methods
|
||||
case 'add_edge':
|
||||
case 'remove_edge':
|
||||
console.warn('Edge modification not yet implemented:', modification);
|
||||
break;
|
||||
}
|
||||
}, [addNode, updateNodeData, selectNode, deleteSelected]);
|
||||
|
||||
const {
|
||||
messages,
|
||||
isThinking,
|
||||
@@ -41,22 +113,38 @@ export const ChatPane: React.FC<ChatPaneProps> = ({
|
||||
sendMessage,
|
||||
clearMessages,
|
||||
switchMode,
|
||||
updateCanvasState,
|
||||
} = useChat({
|
||||
studyId: selectedStudy?.id,
|
||||
mode: 'user',
|
||||
useWebSocket: true,
|
||||
canvasState,
|
||||
onError: (err) => console.error('Chat error:', err),
|
||||
onCanvasModification: handleCanvasModification,
|
||||
});
|
||||
|
||||
// Keep canvas state synced with chat
|
||||
useEffect(() => {
|
||||
updateCanvasState(canvasState);
|
||||
}, [canvasState, updateCanvasState]);
|
||||
|
||||
// Auto-scroll to bottom when new messages arrive
|
||||
useEffect(() => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
|
||||
}, [messages, isThinking]);
|
||||
|
||||
// Welcome message based on study context
|
||||
const welcomeMessage = selectedStudy
|
||||
? `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`
|
||||
: 'Select a study to get started, or ask me to help you create a new one.';
|
||||
// Welcome message based on study and canvas context
|
||||
const welcomeMessage = useMemo(() => {
|
||||
if (selectedStudy) {
|
||||
return `Ready to help with **${selectedStudy.name || selectedStudy.id}**. Ask me about optimization progress, results analysis, or how to improve your design.`;
|
||||
}
|
||||
if (nodes.length > 0) {
|
||||
const dvCount = nodes.filter(n => n.type === 'designVar').length;
|
||||
const objCount = nodes.filter(n => n.type === 'objective').length;
|
||||
return `I can see your canvas with ${dvCount} design variables and ${objCount} objectives. Ask me to analyze, validate, or create a study from this setup.`;
|
||||
}
|
||||
return 'Select a study to get started, or build an optimization in the Canvas Builder.';
|
||||
}, [selectedStudy, nodes]);
|
||||
|
||||
// Collapsed state - just show toggle button
|
||||
if (!isOpen) {
|
||||
|
||||
@@ -30,22 +30,25 @@ interface ToolCallCardProps {
|
||||
}
|
||||
|
||||
// Map tool names to friendly labels and icons
|
||||
const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ className?: string }> }> = {
|
||||
const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ className?: string }>; color?: string }> = {
|
||||
// Study tools
|
||||
list_studies: { label: 'Listing Studies', icon: Database },
|
||||
get_study_status: { label: 'Getting Status', icon: FileSearch },
|
||||
create_study: { label: 'Creating Study', icon: Settings },
|
||||
create_study: { label: 'Creating Study', icon: Settings, color: 'text-green-400' },
|
||||
|
||||
// Optimization tools
|
||||
run_optimization: { label: 'Starting Optimization', icon: Play },
|
||||
run_optimization: { label: 'Starting Optimization', icon: Play, color: 'text-blue-400' },
|
||||
stop_optimization: { label: 'Stopping Optimization', icon: XCircle },
|
||||
get_optimization_status: { label: 'Checking Progress', icon: BarChart2 },
|
||||
|
||||
// Analysis tools
|
||||
get_trial_data: { label: 'Querying Trials', icon: Database },
|
||||
query_trials: { label: 'Querying Trials', icon: Database },
|
||||
get_trial_details: { label: 'Getting Trial Details', icon: FileSearch },
|
||||
analyze_convergence: { label: 'Analyzing Convergence', icon: BarChart2 },
|
||||
compare_trials: { label: 'Comparing Trials', icon: BarChart2 },
|
||||
get_best_design: { label: 'Getting Best Design', icon: CheckCircle },
|
||||
get_optimization_summary: { label: 'Getting Summary', icon: BarChart2 },
|
||||
|
||||
// Reporting tools
|
||||
generate_report: { label: 'Generating Report', icon: FileText },
|
||||
@@ -56,6 +59,25 @@ const TOOL_INFO: Record<string, { label: string; icon: React.ComponentType<{ cla
|
||||
recommend_method: { label: 'Recommending Method', icon: Settings },
|
||||
query_extractors: { label: 'Listing Extractors', icon: Database },
|
||||
|
||||
// Config tools (read)
|
||||
read_study_config: { label: 'Reading Config', icon: FileSearch },
|
||||
read_study_readme: { label: 'Reading README', icon: FileText },
|
||||
|
||||
// === WRITE TOOLS (Power Mode) ===
|
||||
add_design_variable: { label: 'Adding Design Variable', icon: Settings, color: 'text-amber-400' },
|
||||
add_extractor: { label: 'Adding Extractor', icon: Settings, color: 'text-amber-400' },
|
||||
add_objective: { label: 'Adding Objective', icon: Settings, color: 'text-amber-400' },
|
||||
add_constraint: { label: 'Adding Constraint', icon: Settings, color: 'text-amber-400' },
|
||||
update_spec_field: { label: 'Updating Field', icon: Settings, color: 'text-amber-400' },
|
||||
remove_node: { label: 'Removing Node', icon: XCircle, color: 'text-red-400' },
|
||||
|
||||
// === INTERVIEW TOOLS ===
|
||||
start_interview: { label: 'Starting Interview', icon: HelpCircle, color: 'text-purple-400' },
|
||||
interview_record: { label: 'Recording Answer', icon: CheckCircle, color: 'text-purple-400' },
|
||||
interview_advance: { label: 'Advancing Interview', icon: Play, color: 'text-purple-400' },
|
||||
interview_status: { label: 'Checking Progress', icon: BarChart2, color: 'text-purple-400' },
|
||||
interview_finalize: { label: 'Creating Study', icon: CheckCircle, color: 'text-green-400' },
|
||||
|
||||
// Admin tools (power mode)
|
||||
edit_file: { label: 'Editing File', icon: FileText },
|
||||
create_file: { label: 'Creating File', icon: FileText },
|
||||
@@ -104,7 +126,7 @@ export const ToolCallCard: React.FC<ToolCallCardProps> = ({ toolCall }) => {
|
||||
)}
|
||||
|
||||
{/* Tool icon */}
|
||||
<Icon className="w-4 h-4 text-dark-400 flex-shrink-0" />
|
||||
<Icon className={`w-4 h-4 flex-shrink-0 ${info.color || 'text-dark-400'}`} />
|
||||
|
||||
{/* Label */}
|
||||
<span className="flex-1 text-sm text-dark-200 truncate">{info.label}</span>
|
||||
|
||||
@@ -1,260 +0,0 @@
|
||||
/**
|
||||
* PlotlyConvergencePlot - Interactive convergence plot using Plotly
|
||||
*
|
||||
* Features:
|
||||
* - Line plot showing objective vs trial number
|
||||
* - Best-so-far trace overlay
|
||||
* - FEA vs NN trial differentiation
|
||||
* - Hover tooltips with trial details
|
||||
* - Range slider for zooming
|
||||
* - Log scale toggle
|
||||
* - Export to PNG/SVG
|
||||
*/
|
||||
|
||||
import { useMemo, useState } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface Trial {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs?: Record<string, any>;
|
||||
source?: 'FEA' | 'NN' | 'V10_FEA';
|
||||
constraint_satisfied?: boolean;
|
||||
}
|
||||
|
||||
// Penalty threshold - objectives above this are considered failed/penalty trials
|
||||
const PENALTY_THRESHOLD = 100000;
|
||||
|
||||
interface PlotlyConvergencePlotProps {
|
||||
trials: Trial[];
|
||||
objectiveIndex?: number;
|
||||
objectiveName?: string;
|
||||
direction?: 'minimize' | 'maximize';
|
||||
height?: number;
|
||||
showRangeSlider?: boolean;
|
||||
showLogScaleToggle?: boolean;
|
||||
}
|
||||
|
||||
export function PlotlyConvergencePlot({
|
||||
trials,
|
||||
objectiveIndex = 0,
|
||||
objectiveName = 'Objective',
|
||||
direction = 'minimize',
|
||||
height = 400,
|
||||
showRangeSlider = true,
|
||||
showLogScaleToggle = true
|
||||
}: PlotlyConvergencePlotProps) {
|
||||
const [useLogScale, setUseLogScale] = useState(false);
|
||||
|
||||
// Process trials and calculate best-so-far
|
||||
const { feaData, nnData, bestSoFar, allX, allY } = useMemo(() => {
|
||||
if (!trials.length) return { feaData: { x: [], y: [], text: [] }, nnData: { x: [], y: [], text: [] }, bestSoFar: { x: [], y: [] }, allX: [], allY: [] };
|
||||
|
||||
// Sort by trial number
|
||||
const sorted = [...trials].sort((a, b) => a.trial_number - b.trial_number);
|
||||
|
||||
const fea: { x: number[]; y: number[]; text: string[] } = { x: [], y: [], text: [] };
|
||||
const nn: { x: number[]; y: number[]; text: string[] } = { x: [], y: [], text: [] };
|
||||
const best: { x: number[]; y: number[] } = { x: [], y: [] };
|
||||
const xs: number[] = [];
|
||||
const ys: number[] = [];
|
||||
|
||||
let bestValue = direction === 'minimize' ? Infinity : -Infinity;
|
||||
|
||||
sorted.forEach(t => {
|
||||
const val = t.values?.[objectiveIndex] ?? t.user_attrs?.[objectiveName] ?? null;
|
||||
if (val === null || !isFinite(val)) return;
|
||||
|
||||
// Filter out failed/penalty trials:
|
||||
// 1. Objective above penalty threshold (e.g., 1000000 = solver failure)
|
||||
// 2. constraint_satisfied explicitly false
|
||||
// 3. user_attrs indicates pruned/failed
|
||||
const isPenalty = val >= PENALTY_THRESHOLD;
|
||||
const isFailed = t.constraint_satisfied === false;
|
||||
const isPruned = t.user_attrs?.pruned === true || t.user_attrs?.fail_reason;
|
||||
if (isPenalty || isFailed || isPruned) return;
|
||||
|
||||
const source = t.source || t.user_attrs?.source || 'FEA';
|
||||
const hoverText = `Trial #${t.trial_number}<br>${objectiveName}: ${val.toFixed(4)}<br>Source: ${source}`;
|
||||
|
||||
xs.push(t.trial_number);
|
||||
ys.push(val);
|
||||
|
||||
if (source === 'NN') {
|
||||
nn.x.push(t.trial_number);
|
||||
nn.y.push(val);
|
||||
nn.text.push(hoverText);
|
||||
} else {
|
||||
fea.x.push(t.trial_number);
|
||||
fea.y.push(val);
|
||||
fea.text.push(hoverText);
|
||||
}
|
||||
|
||||
// Update best-so-far
|
||||
if (direction === 'minimize') {
|
||||
if (val < bestValue) bestValue = val;
|
||||
} else {
|
||||
if (val > bestValue) bestValue = val;
|
||||
}
|
||||
best.x.push(t.trial_number);
|
||||
best.y.push(bestValue);
|
||||
});
|
||||
|
||||
return { feaData: fea, nnData: nn, bestSoFar: best, allX: xs, allY: ys };
|
||||
}, [trials, objectiveIndex, objectiveName, direction]);
|
||||
|
||||
if (!trials.length || allX.length === 0) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64 text-gray-500">
|
||||
No trial data available
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const traces: any[] = [];
|
||||
|
||||
// FEA trials scatter
|
||||
if (feaData.x.length > 0) {
|
||||
traces.push({
|
||||
type: 'scatter',
|
||||
mode: 'markers',
|
||||
name: `FEA (${feaData.x.length})`,
|
||||
x: feaData.x,
|
||||
y: feaData.y,
|
||||
text: feaData.text,
|
||||
hoverinfo: 'text',
|
||||
marker: {
|
||||
color: '#3B82F6',
|
||||
size: 8,
|
||||
opacity: 0.7,
|
||||
line: { color: '#1E40AF', width: 1 }
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// NN trials scatter
|
||||
if (nnData.x.length > 0) {
|
||||
traces.push({
|
||||
type: 'scatter',
|
||||
mode: 'markers',
|
||||
name: `NN (${nnData.x.length})`,
|
||||
x: nnData.x,
|
||||
y: nnData.y,
|
||||
text: nnData.text,
|
||||
hoverinfo: 'text',
|
||||
marker: {
|
||||
color: '#F97316',
|
||||
size: 6,
|
||||
symbol: 'cross',
|
||||
opacity: 0.6
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Best-so-far line
|
||||
if (bestSoFar.x.length > 0) {
|
||||
traces.push({
|
||||
type: 'scatter',
|
||||
mode: 'lines',
|
||||
name: 'Best So Far',
|
||||
x: bestSoFar.x,
|
||||
y: bestSoFar.y,
|
||||
line: {
|
||||
color: '#10B981',
|
||||
width: 3,
|
||||
shape: 'hv' // Step line
|
||||
},
|
||||
hoverinfo: 'y'
|
||||
});
|
||||
}
|
||||
|
||||
const layout: any = {
|
||||
height,
|
||||
margin: { l: 60, r: 30, t: 30, b: showRangeSlider ? 80 : 50 },
|
||||
paper_bgcolor: 'rgba(0,0,0,0)',
|
||||
plot_bgcolor: 'rgba(0,0,0,0)',
|
||||
xaxis: {
|
||||
title: 'Trial Number',
|
||||
gridcolor: '#E5E7EB',
|
||||
zerolinecolor: '#D1D5DB',
|
||||
rangeslider: showRangeSlider ? { visible: true } : undefined
|
||||
},
|
||||
yaxis: {
|
||||
title: useLogScale ? `log₁₀(${objectiveName})` : objectiveName,
|
||||
gridcolor: '#E5E7EB',
|
||||
zerolinecolor: '#D1D5DB',
|
||||
type: useLogScale ? 'log' : 'linear'
|
||||
},
|
||||
legend: {
|
||||
x: 1,
|
||||
y: 1,
|
||||
xanchor: 'right',
|
||||
bgcolor: 'rgba(255,255,255,0.8)',
|
||||
bordercolor: '#E5E7EB',
|
||||
borderwidth: 1
|
||||
},
|
||||
font: { family: 'Inter, system-ui, sans-serif' },
|
||||
hovermode: 'closest'
|
||||
};
|
||||
|
||||
// Best value annotation
|
||||
const bestVal = direction === 'minimize'
|
||||
? Math.min(...allY)
|
||||
: Math.max(...allY);
|
||||
const bestIdx = allY.indexOf(bestVal);
|
||||
const bestTrial = allX[bestIdx];
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
{/* Summary stats and controls */}
|
||||
<div className="flex items-center justify-between mb-3">
|
||||
<div className="flex gap-6 text-sm">
|
||||
<div className="text-gray-600">
|
||||
Best: <span className="font-semibold text-green-600">{bestVal.toFixed(4)}</span>
|
||||
<span className="text-gray-400 ml-1">(Trial #{bestTrial})</span>
|
||||
</div>
|
||||
<div className="text-gray-600">
|
||||
Current: <span className="font-semibold">{allY[allY.length - 1].toFixed(4)}</span>
|
||||
</div>
|
||||
<div className="text-gray-600">
|
||||
Trials: <span className="font-semibold">{allX.length}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Log scale toggle */}
|
||||
{showLogScaleToggle && (
|
||||
<button
|
||||
onClick={() => setUseLogScale(!useLogScale)}
|
||||
className={`px-3 py-1 text-xs rounded transition-colors ${
|
||||
useLogScale
|
||||
? 'bg-blue-600 text-white'
|
||||
: 'bg-gray-200 text-gray-700 hover:bg-gray-300'
|
||||
}`}
|
||||
title="Toggle logarithmic scale - better for viewing early improvements"
|
||||
>
|
||||
{useLogScale ? 'Log Scale' : 'Linear Scale'}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Plot
|
||||
data={traces}
|
||||
layout={layout}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
displaylogo: false,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
toImageButtonOptions: {
|
||||
format: 'png',
|
||||
filename: 'convergence_plot',
|
||||
height: 600,
|
||||
width: 1200,
|
||||
scale: 2
|
||||
}
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
import { useMemo } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface TrialData {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
}
|
||||
|
||||
interface PlotlyCorrelationHeatmapProps {
|
||||
trials: TrialData[];
|
||||
objectiveName?: string;
|
||||
height?: number;
|
||||
}
|
||||
|
||||
// Calculate Pearson correlation coefficient
|
||||
function pearsonCorrelation(x: number[], y: number[]): number {
|
||||
const n = x.length;
|
||||
if (n === 0 || n !== y.length) return 0;
|
||||
|
||||
const meanX = x.reduce((a, b) => a + b, 0) / n;
|
||||
const meanY = y.reduce((a, b) => a + b, 0) / n;
|
||||
|
||||
let numerator = 0;
|
||||
let denomX = 0;
|
||||
let denomY = 0;
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
const dx = x[i] - meanX;
|
||||
const dy = y[i] - meanY;
|
||||
numerator += dx * dy;
|
||||
denomX += dx * dx;
|
||||
denomY += dy * dy;
|
||||
}
|
||||
|
||||
const denominator = Math.sqrt(denomX) * Math.sqrt(denomY);
|
||||
return denominator === 0 ? 0 : numerator / denominator;
|
||||
}
|
||||
|
||||
export function PlotlyCorrelationHeatmap({
|
||||
trials,
|
||||
objectiveName = 'Objective',
|
||||
height = 500
|
||||
}: PlotlyCorrelationHeatmapProps) {
|
||||
const { matrix, labels, annotations } = useMemo(() => {
|
||||
if (trials.length < 3) {
|
||||
return { matrix: [], labels: [], annotations: [] };
|
||||
}
|
||||
|
||||
// Get parameter names
|
||||
const paramNames = Object.keys(trials[0].params);
|
||||
const allLabels = [...paramNames, objectiveName];
|
||||
|
||||
// Extract data columns
|
||||
const columns: Record<string, number[]> = {};
|
||||
paramNames.forEach(name => {
|
||||
columns[name] = trials.map(t => t.params[name]).filter(v => v !== undefined && !isNaN(v));
|
||||
});
|
||||
columns[objectiveName] = trials.map(t => t.values[0]).filter(v => v !== undefined && !isNaN(v));
|
||||
|
||||
// Calculate correlation matrix
|
||||
const n = allLabels.length;
|
||||
const correlationMatrix: number[][] = [];
|
||||
const annotationData: any[] = [];
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
const row: number[] = [];
|
||||
for (let j = 0; j < n; j++) {
|
||||
const col1 = columns[allLabels[i]];
|
||||
const col2 = columns[allLabels[j]];
|
||||
|
||||
// Ensure same length
|
||||
const minLen = Math.min(col1.length, col2.length);
|
||||
const corr = pearsonCorrelation(col1.slice(0, minLen), col2.slice(0, minLen));
|
||||
row.push(corr);
|
||||
|
||||
// Add annotation
|
||||
annotationData.push({
|
||||
x: allLabels[j],
|
||||
y: allLabels[i],
|
||||
text: corr.toFixed(2),
|
||||
showarrow: false,
|
||||
font: {
|
||||
color: Math.abs(corr) > 0.5 ? '#fff' : '#888',
|
||||
size: 11
|
||||
}
|
||||
});
|
||||
}
|
||||
correlationMatrix.push(row);
|
||||
}
|
||||
|
||||
return {
|
||||
matrix: correlationMatrix,
|
||||
labels: allLabels,
|
||||
annotations: annotationData
|
||||
};
|
||||
}, [trials, objectiveName]);
|
||||
|
||||
if (trials.length < 3) {
|
||||
return (
|
||||
<div className="h-64 flex items-center justify-center text-dark-400">
|
||||
<p>Need at least 3 trials to compute correlations</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
z: matrix,
|
||||
x: labels,
|
||||
y: labels,
|
||||
type: 'heatmap',
|
||||
colorscale: [
|
||||
[0, '#ef4444'], // -1: strong negative (red)
|
||||
[0.25, '#f87171'], // -0.5: moderate negative
|
||||
[0.5, '#1a1b26'], // 0: no correlation (dark)
|
||||
[0.75, '#60a5fa'], // 0.5: moderate positive
|
||||
[1, '#3b82f6'] // 1: strong positive (blue)
|
||||
],
|
||||
zmin: -1,
|
||||
zmax: 1,
|
||||
showscale: true,
|
||||
colorbar: {
|
||||
title: { text: 'Correlation', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
len: 0.8
|
||||
},
|
||||
hovertemplate: '%{y} vs %{x}<br>Correlation: %{z:.3f}<extra></extra>'
|
||||
}
|
||||
]}
|
||||
layout={{
|
||||
title: {
|
||||
text: 'Parameter-Objective Correlation Matrix',
|
||||
font: { color: '#fff', size: 14 }
|
||||
},
|
||||
height,
|
||||
margin: { l: 120, r: 60, t: 60, b: 120 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
xaxis: {
|
||||
tickangle: 45,
|
||||
tickfont: { color: '#888', size: 10 },
|
||||
gridcolor: 'rgba(255,255,255,0.05)'
|
||||
},
|
||||
yaxis: {
|
||||
tickfont: { color: '#888', size: 10 },
|
||||
gridcolor: 'rgba(255,255,255,0.05)'
|
||||
},
|
||||
annotations: annotations
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
import { useMemo } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface TrialData {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
constraint_satisfied?: boolean;
|
||||
}
|
||||
|
||||
interface PlotlyFeasibilityChartProps {
|
||||
trials: TrialData[];
|
||||
height?: number;
|
||||
}
|
||||
|
||||
export function PlotlyFeasibilityChart({
|
||||
trials,
|
||||
height = 350
|
||||
}: PlotlyFeasibilityChartProps) {
|
||||
const { trialNumbers, cumulativeFeasibility, windowedFeasibility } = useMemo(() => {
|
||||
if (trials.length === 0) {
|
||||
return { trialNumbers: [], cumulativeFeasibility: [], windowedFeasibility: [] };
|
||||
}
|
||||
|
||||
// Sort trials by number
|
||||
const sorted = [...trials].sort((a, b) => a.trial_number - b.trial_number);
|
||||
|
||||
const numbers: number[] = [];
|
||||
const cumulative: number[] = [];
|
||||
const windowed: number[] = [];
|
||||
|
||||
let feasibleCount = 0;
|
||||
const windowSize = Math.min(20, Math.floor(sorted.length / 5) || 1);
|
||||
|
||||
sorted.forEach((trial, idx) => {
|
||||
numbers.push(trial.trial_number);
|
||||
|
||||
// Cumulative feasibility
|
||||
if (trial.constraint_satisfied !== false) {
|
||||
feasibleCount++;
|
||||
}
|
||||
cumulative.push((feasibleCount / (idx + 1)) * 100);
|
||||
|
||||
// Windowed (rolling) feasibility
|
||||
const windowStart = Math.max(0, idx - windowSize + 1);
|
||||
const windowTrials = sorted.slice(windowStart, idx + 1);
|
||||
const windowFeasible = windowTrials.filter(t => t.constraint_satisfied !== false).length;
|
||||
windowed.push((windowFeasible / windowTrials.length) * 100);
|
||||
});
|
||||
|
||||
return { trialNumbers: numbers, cumulativeFeasibility: cumulative, windowedFeasibility: windowed };
|
||||
}, [trials]);
|
||||
|
||||
if (trials.length === 0) {
|
||||
return (
|
||||
<div className="h-64 flex items-center justify-center text-dark-400">
|
||||
<p>No trials to display</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
x: trialNumbers,
|
||||
y: cumulativeFeasibility,
|
||||
type: 'scatter',
|
||||
mode: 'lines',
|
||||
name: 'Cumulative Feasibility',
|
||||
line: { color: '#22c55e', width: 2 },
|
||||
hovertemplate: 'Trial %{x}<br>Cumulative: %{y:.1f}%<extra></extra>'
|
||||
},
|
||||
{
|
||||
x: trialNumbers,
|
||||
y: windowedFeasibility,
|
||||
type: 'scatter',
|
||||
mode: 'lines',
|
||||
name: 'Rolling (20-trial)',
|
||||
line: { color: '#60a5fa', width: 2, dash: 'dot' },
|
||||
hovertemplate: 'Trial %{x}<br>Rolling: %{y:.1f}%<extra></extra>'
|
||||
}
|
||||
]}
|
||||
layout={{
|
||||
height,
|
||||
margin: { l: 60, r: 30, t: 30, b: 50 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
xaxis: {
|
||||
title: { text: 'Trial Number', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.05)',
|
||||
zeroline: false
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: 'Feasibility Rate (%)', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.1)',
|
||||
zeroline: false,
|
||||
range: [0, 105]
|
||||
},
|
||||
legend: {
|
||||
font: { color: '#888' },
|
||||
bgcolor: 'rgba(0,0,0,0.5)',
|
||||
x: 0.02,
|
||||
y: 0.98,
|
||||
xanchor: 'left',
|
||||
yanchor: 'top'
|
||||
},
|
||||
showlegend: true,
|
||||
hovermode: 'x unified'
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
/**
|
||||
* PlotlyParallelCoordinates - Interactive parallel coordinates plot using Plotly
|
||||
*
|
||||
* Features:
|
||||
* - Native zoom, pan, and selection
|
||||
* - Hover tooltips with trial details
|
||||
* - Brush filtering on each axis
|
||||
* - FEA vs NN color differentiation
|
||||
* - Export to PNG/SVG
|
||||
*/
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface Trial {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs?: Record<string, any>;
|
||||
constraint_satisfied?: boolean;
|
||||
source?: 'FEA' | 'NN' | 'V10_FEA';
|
||||
}
|
||||
|
||||
interface Objective {
|
||||
name: string;
|
||||
direction?: 'minimize' | 'maximize';
|
||||
unit?: string;
|
||||
}
|
||||
|
||||
interface DesignVariable {
|
||||
name: string;
|
||||
unit?: string;
|
||||
min?: number;
|
||||
max?: number;
|
||||
}
|
||||
|
||||
interface PlotlyParallelCoordinatesProps {
|
||||
trials: Trial[];
|
||||
objectives: Objective[];
|
||||
designVariables: DesignVariable[];
|
||||
paretoFront?: Trial[];
|
||||
height?: number;
|
||||
}
|
||||
|
||||
export function PlotlyParallelCoordinates({
|
||||
trials,
|
||||
objectives,
|
||||
designVariables,
|
||||
paretoFront = [],
|
||||
height = 500
|
||||
}: PlotlyParallelCoordinatesProps) {
|
||||
// Create set of Pareto front trial numbers
|
||||
const paretoSet = useMemo(() => new Set(paretoFront.map(t => t.trial_number)), [paretoFront]);
|
||||
|
||||
// Build dimensions array for parallel coordinates
|
||||
const { dimensions, colorValues, colorScale } = useMemo(() => {
|
||||
if (!trials.length) return { dimensions: [], colorValues: [], colorScale: [] };
|
||||
|
||||
const dims: any[] = [];
|
||||
const colors: number[] = [];
|
||||
|
||||
// Get all design variable names
|
||||
const dvNames = designVariables.map(dv => dv.name);
|
||||
const objNames = objectives.map(obj => obj.name);
|
||||
|
||||
// Add design variable dimensions
|
||||
dvNames.forEach((name, idx) => {
|
||||
const dv = designVariables[idx];
|
||||
const values = trials.map(t => t.params[name] ?? 0);
|
||||
const validValues = values.filter(v => v !== null && v !== undefined && isFinite(v));
|
||||
|
||||
if (validValues.length === 0) return;
|
||||
|
||||
dims.push({
|
||||
label: name,
|
||||
values: values,
|
||||
range: [
|
||||
dv?.min ?? Math.min(...validValues),
|
||||
dv?.max ?? Math.max(...validValues)
|
||||
],
|
||||
constraintrange: undefined
|
||||
});
|
||||
});
|
||||
|
||||
// Add objective dimensions
|
||||
objNames.forEach((name, idx) => {
|
||||
const obj = objectives[idx];
|
||||
const values = trials.map(t => {
|
||||
// Try to get from values array first, then user_attrs
|
||||
if (t.values && t.values[idx] !== undefined) {
|
||||
return t.values[idx];
|
||||
}
|
||||
return t.user_attrs?.[name] ?? 0;
|
||||
});
|
||||
const validValues = values.filter(v => v !== null && v !== undefined && isFinite(v));
|
||||
|
||||
if (validValues.length === 0) return;
|
||||
|
||||
dims.push({
|
||||
label: `${name}${obj.unit ? ` (${obj.unit})` : ''}`,
|
||||
values: values,
|
||||
range: [Math.min(...validValues) * 0.95, Math.max(...validValues) * 1.05]
|
||||
});
|
||||
});
|
||||
|
||||
// Build color array: 0 = V10_FEA, 1 = FEA, 2 = NN, 3 = Pareto
|
||||
trials.forEach(t => {
|
||||
const source = t.source || t.user_attrs?.source || 'FEA';
|
||||
const isPareto = paretoSet.has(t.trial_number);
|
||||
|
||||
if (isPareto) {
|
||||
colors.push(3); // Pareto - special color
|
||||
} else if (source === 'NN') {
|
||||
colors.push(2); // NN trials
|
||||
} else if (source === 'V10_FEA') {
|
||||
colors.push(0); // V10 FEA
|
||||
} else {
|
||||
colors.push(1); // V11 FEA
|
||||
}
|
||||
});
|
||||
|
||||
// Color scale: V10_FEA (light blue), FEA (blue), NN (orange), Pareto (green)
|
||||
const scale: [number, string][] = [
|
||||
[0, '#93C5FD'], // V10_FEA - light blue
|
||||
[0.33, '#2563EB'], // FEA - blue
|
||||
[0.66, '#F97316'], // NN - orange
|
||||
[1, '#10B981'] // Pareto - green
|
||||
];
|
||||
|
||||
return { dimensions: dims, colorValues: colors, colorScale: scale };
|
||||
}, [trials, objectives, designVariables, paretoSet]);
|
||||
|
||||
if (!trials.length || dimensions.length === 0) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64 text-gray-500">
|
||||
No trial data available for parallel coordinates
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Count trial types for legend
|
||||
const feaCount = trials.filter(t => {
|
||||
const source = t.source || t.user_attrs?.source || 'FEA';
|
||||
return source === 'FEA' || source === 'V10_FEA';
|
||||
}).length;
|
||||
const nnCount = trials.filter(t => {
|
||||
const source = t.source || t.user_attrs?.source || 'FEA';
|
||||
return source === 'NN';
|
||||
}).length;
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
{/* Legend */}
|
||||
<div className="flex gap-4 justify-center mb-2 text-sm">
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-4 h-1 rounded" style={{ backgroundColor: '#2563EB' }} />
|
||||
<span className="text-gray-600">FEA ({feaCount})</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-4 h-1 rounded" style={{ backgroundColor: '#F97316' }} />
|
||||
<span className="text-gray-600">NN ({nnCount})</span>
|
||||
</div>
|
||||
{paretoFront.length > 0 && (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-4 h-1 rounded" style={{ backgroundColor: '#10B981' }} />
|
||||
<span className="text-gray-600">Pareto ({paretoFront.length})</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
type: 'parcoords',
|
||||
line: {
|
||||
color: colorValues,
|
||||
colorscale: colorScale as any,
|
||||
showscale: false
|
||||
},
|
||||
dimensions: dimensions,
|
||||
labelangle: -30,
|
||||
labelfont: {
|
||||
size: 11,
|
||||
color: '#374151'
|
||||
},
|
||||
tickfont: {
|
||||
size: 10,
|
||||
color: '#6B7280'
|
||||
}
|
||||
} as any
|
||||
]}
|
||||
layout={{
|
||||
height: height,
|
||||
margin: { l: 80, r: 80, t: 30, b: 30 },
|
||||
paper_bgcolor: 'rgba(0,0,0,0)',
|
||||
plot_bgcolor: 'rgba(0,0,0,0)',
|
||||
font: {
|
||||
family: 'Inter, system-ui, sans-serif'
|
||||
}
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
displaylogo: false,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
toImageButtonOptions: {
|
||||
format: 'png',
|
||||
filename: 'parallel_coordinates',
|
||||
height: 800,
|
||||
width: 1400,
|
||||
scale: 2
|
||||
}
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
<p className="text-xs text-gray-500 text-center mt-2">
|
||||
Drag along axes to filter. Double-click to reset.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
/**
|
||||
* PlotlyParameterImportance - Interactive parameter importance chart using Plotly
|
||||
*
|
||||
* Features:
|
||||
* - Horizontal bar chart showing correlation/importance
|
||||
* - Color coding by positive/negative correlation
|
||||
* - Hover tooltips with details
|
||||
* - Sortable by importance
|
||||
*/
|
||||
|
||||
import { useMemo, useState } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface Trial {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs?: Record<string, any>;
|
||||
}
|
||||
|
||||
interface DesignVariable {
|
||||
name: string;
|
||||
unit?: string;
|
||||
}
|
||||
|
||||
interface PlotlyParameterImportanceProps {
|
||||
trials: Trial[];
|
||||
designVariables: DesignVariable[];
|
||||
objectiveIndex?: number;
|
||||
objectiveName?: string;
|
||||
height?: number;
|
||||
}
|
||||
|
||||
// Calculate Pearson correlation coefficient
|
||||
function pearsonCorrelation(x: number[], y: number[]): number {
|
||||
const n = x.length;
|
||||
if (n === 0) return 0;
|
||||
|
||||
const sumX = x.reduce((a, b) => a + b, 0);
|
||||
const sumY = y.reduce((a, b) => a + b, 0);
|
||||
const sumXY = x.reduce((acc, xi, i) => acc + xi * y[i], 0);
|
||||
const sumX2 = x.reduce((acc, xi) => acc + xi * xi, 0);
|
||||
const sumY2 = y.reduce((acc, yi) => acc + yi * yi, 0);
|
||||
|
||||
const numerator = n * sumXY - sumX * sumY;
|
||||
const denominator = Math.sqrt((n * sumX2 - sumX * sumX) * (n * sumY2 - sumY * sumY));
|
||||
|
||||
if (denominator === 0) return 0;
|
||||
return numerator / denominator;
|
||||
}
|
||||
|
||||
export function PlotlyParameterImportance({
|
||||
trials,
|
||||
designVariables,
|
||||
objectiveIndex = 0,
|
||||
objectiveName = 'Objective',
|
||||
height = 400
|
||||
}: PlotlyParameterImportanceProps) {
|
||||
const [sortBy, setSortBy] = useState<'importance' | 'name'>('importance');
|
||||
|
||||
// Calculate correlations for each parameter
|
||||
const correlations = useMemo(() => {
|
||||
if (!trials.length || !designVariables.length) return [];
|
||||
|
||||
// Get objective values
|
||||
const objValues = trials.map(t => {
|
||||
if (t.values && t.values[objectiveIndex] !== undefined) {
|
||||
return t.values[objectiveIndex];
|
||||
}
|
||||
return t.user_attrs?.[objectiveName] ?? null;
|
||||
}).filter((v): v is number => v !== null && isFinite(v));
|
||||
|
||||
if (objValues.length < 3) return []; // Need at least 3 points for correlation
|
||||
|
||||
const results: { name: string; correlation: number; absCorrelation: number }[] = [];
|
||||
|
||||
designVariables.forEach(dv => {
|
||||
const paramValues = trials
|
||||
.map((t) => {
|
||||
const objVal = t.values?.[objectiveIndex] ?? t.user_attrs?.[objectiveName];
|
||||
if (objVal === null || objVal === undefined || !isFinite(objVal)) return null;
|
||||
return { param: t.params[dv.name], obj: objVal };
|
||||
})
|
||||
.filter((v): v is { param: number; obj: number } => v !== null && v.param !== undefined);
|
||||
|
||||
if (paramValues.length < 3) return;
|
||||
|
||||
const x = paramValues.map(v => v.param);
|
||||
const y = paramValues.map(v => v.obj);
|
||||
const corr = pearsonCorrelation(x, y);
|
||||
|
||||
results.push({
|
||||
name: dv.name,
|
||||
correlation: corr,
|
||||
absCorrelation: Math.abs(corr)
|
||||
});
|
||||
});
|
||||
|
||||
// Sort by absolute correlation or name
|
||||
if (sortBy === 'importance') {
|
||||
results.sort((a, b) => b.absCorrelation - a.absCorrelation);
|
||||
} else {
|
||||
results.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
return results;
|
||||
}, [trials, designVariables, objectiveIndex, objectiveName, sortBy]);
|
||||
|
||||
if (!correlations.length) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64 text-gray-500">
|
||||
Not enough data to calculate parameter importance
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Build bar chart data
|
||||
const names = correlations.map(c => c.name);
|
||||
const values = correlations.map(c => c.correlation);
|
||||
const colors = values.map(v => v > 0 ? '#EF4444' : '#22C55E'); // Red for positive (worse), Green for negative (better) when minimizing
|
||||
const hoverTexts = correlations.map(c =>
|
||||
`${c.name}<br>Correlation: ${c.correlation.toFixed(4)}<br>|r|: ${c.absCorrelation.toFixed(4)}<br>${c.correlation > 0 ? 'Higher → Higher objective' : 'Higher → Lower objective'}`
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
{/* Controls */}
|
||||
<div className="flex justify-between items-center mb-3">
|
||||
<div className="text-sm text-gray-600">
|
||||
Correlation with <span className="font-semibold">{objectiveName}</span>
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
<button
|
||||
onClick={() => setSortBy('importance')}
|
||||
className={`px-3 py-1 text-xs rounded ${sortBy === 'importance' ? 'bg-blue-500 text-white' : 'bg-gray-100 text-gray-700'}`}
|
||||
>
|
||||
By Importance
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setSortBy('name')}
|
||||
className={`px-3 py-1 text-xs rounded ${sortBy === 'name' ? 'bg-blue-500 text-white' : 'bg-gray-100 text-gray-700'}`}
|
||||
>
|
||||
By Name
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
type: 'bar',
|
||||
orientation: 'h',
|
||||
y: names,
|
||||
x: values,
|
||||
text: hoverTexts,
|
||||
hoverinfo: 'text',
|
||||
marker: {
|
||||
color: colors,
|
||||
line: { color: '#fff', width: 1 }
|
||||
}
|
||||
}
|
||||
]}
|
||||
layout={{
|
||||
height: Math.max(height, correlations.length * 30 + 80),
|
||||
margin: { l: 150, r: 30, t: 10, b: 50 },
|
||||
paper_bgcolor: 'rgba(0,0,0,0)',
|
||||
plot_bgcolor: 'rgba(0,0,0,0)',
|
||||
xaxis: {
|
||||
title: { text: 'Correlation Coefficient' },
|
||||
range: [-1, 1],
|
||||
gridcolor: '#E5E7EB',
|
||||
zerolinecolor: '#9CA3AF',
|
||||
zerolinewidth: 2
|
||||
},
|
||||
yaxis: {
|
||||
automargin: true
|
||||
},
|
||||
font: { family: 'Inter, system-ui, sans-serif', size: 11 },
|
||||
bargap: 0.3
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
displaylogo: false,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
toImageButtonOptions: {
|
||||
format: 'png',
|
||||
filename: 'parameter_importance',
|
||||
height: 600,
|
||||
width: 800,
|
||||
scale: 2
|
||||
}
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
{/* Legend */}
|
||||
<div className="flex gap-6 justify-center mt-3 text-xs">
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-4 h-3 rounded" style={{ backgroundColor: '#EF4444' }} />
|
||||
<span className="text-gray-600">Positive correlation (higher param → higher objective)</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-4 h-3 rounded" style={{ backgroundColor: '#22C55E' }} />
|
||||
<span className="text-gray-600">Negative correlation (higher param → lower objective)</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,448 +0,0 @@
|
||||
/**
|
||||
* PlotlyParetoPlot - Interactive Pareto front visualization using Plotly
|
||||
*
|
||||
* Features:
|
||||
* - 2D scatter with Pareto front highlighted
|
||||
* - 3D scatter for 3-objective problems
|
||||
* - Hover tooltips with trial details
|
||||
* - Pareto front connection line
|
||||
* - FEA vs NN differentiation
|
||||
* - Constraint satisfaction highlighting
|
||||
* - Dark mode styling
|
||||
* - Zoom, pan, and export
|
||||
*/
|
||||
|
||||
import { useMemo, useState } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface Trial {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs?: Record<string, any>;
|
||||
source?: 'FEA' | 'NN' | 'V10_FEA';
|
||||
constraint_satisfied?: boolean;
|
||||
}
|
||||
|
||||
interface Objective {
|
||||
name: string;
|
||||
direction?: 'minimize' | 'maximize';
|
||||
unit?: string;
|
||||
}
|
||||
|
||||
interface PlotlyParetoPlotProps {
|
||||
trials: Trial[];
|
||||
paretoFront: Trial[];
|
||||
objectives: Objective[];
|
||||
height?: number;
|
||||
showParetoLine?: boolean;
|
||||
showInfeasible?: boolean;
|
||||
}
|
||||
|
||||
export function PlotlyParetoPlot({
|
||||
trials,
|
||||
paretoFront,
|
||||
objectives,
|
||||
height = 500,
|
||||
showParetoLine = true,
|
||||
showInfeasible = true
|
||||
}: PlotlyParetoPlotProps) {
|
||||
const [viewMode, setViewMode] = useState<'2d' | '3d'>(objectives.length >= 3 ? '3d' : '2d');
|
||||
const [selectedObjectives, setSelectedObjectives] = useState<[number, number, number]>([0, 1, 2]);
|
||||
|
||||
const paretoSet = useMemo(() => new Set(paretoFront.map(t => t.trial_number)), [paretoFront]);
|
||||
|
||||
// Separate trials by source, Pareto status, and constraint satisfaction
|
||||
const { feaTrials, nnTrials, paretoTrials, infeasibleTrials, stats } = useMemo(() => {
|
||||
const fea: Trial[] = [];
|
||||
const nn: Trial[] = [];
|
||||
const pareto: Trial[] = [];
|
||||
const infeasible: Trial[] = [];
|
||||
|
||||
trials.forEach(t => {
|
||||
const source = t.source || t.user_attrs?.source || 'FEA';
|
||||
const isFeasible = t.constraint_satisfied !== false && t.user_attrs?.constraint_satisfied !== false;
|
||||
|
||||
if (!isFeasible && showInfeasible) {
|
||||
infeasible.push(t);
|
||||
} else if (paretoSet.has(t.trial_number)) {
|
||||
pareto.push(t);
|
||||
} else if (source === 'NN') {
|
||||
nn.push(t);
|
||||
} else {
|
||||
fea.push(t);
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate statistics
|
||||
const stats = {
|
||||
totalTrials: trials.length,
|
||||
paretoCount: pareto.length,
|
||||
feaCount: fea.length + pareto.filter(t => (t.source || 'FEA') !== 'NN').length,
|
||||
nnCount: nn.length + pareto.filter(t => t.source === 'NN').length,
|
||||
infeasibleCount: infeasible.length,
|
||||
hypervolume: 0 // Could calculate if needed
|
||||
};
|
||||
|
||||
return { feaTrials: fea, nnTrials: nn, paretoTrials: pareto, infeasibleTrials: infeasible, stats };
|
||||
}, [trials, paretoSet, showInfeasible]);
|
||||
|
||||
// Helper to get objective value
|
||||
const getObjValue = (trial: Trial, idx: number): number => {
|
||||
if (trial.values && trial.values[idx] !== undefined) {
|
||||
return trial.values[idx];
|
||||
}
|
||||
const objName = objectives[idx]?.name;
|
||||
return trial.user_attrs?.[objName] ?? 0;
|
||||
};
|
||||
|
||||
// Build hover text
|
||||
const buildHoverText = (trial: Trial): string => {
|
||||
const lines = [`Trial #${trial.trial_number}`];
|
||||
objectives.forEach((obj, i) => {
|
||||
const val = getObjValue(trial, i);
|
||||
lines.push(`${obj.name}: ${val.toFixed(4)}${obj.unit ? ` ${obj.unit}` : ''}`);
|
||||
});
|
||||
const source = trial.source || trial.user_attrs?.source || 'FEA';
|
||||
lines.push(`Source: ${source}`);
|
||||
return lines.join('<br>');
|
||||
};
|
||||
|
||||
// Create trace data
|
||||
const createTrace = (
|
||||
trialList: Trial[],
|
||||
name: string,
|
||||
color: string,
|
||||
symbol: string,
|
||||
size: number,
|
||||
opacity: number
|
||||
) => {
|
||||
const [i, j, k] = selectedObjectives;
|
||||
|
||||
if (viewMode === '3d' && objectives.length >= 3) {
|
||||
return {
|
||||
type: 'scatter3d' as const,
|
||||
mode: 'markers' as const,
|
||||
name,
|
||||
x: trialList.map(t => getObjValue(t, i)),
|
||||
y: trialList.map(t => getObjValue(t, j)),
|
||||
z: trialList.map(t => getObjValue(t, k)),
|
||||
text: trialList.map(buildHoverText),
|
||||
hoverinfo: 'text' as const,
|
||||
marker: {
|
||||
color,
|
||||
size,
|
||||
symbol,
|
||||
opacity,
|
||||
line: { color: '#fff', width: 1 }
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
type: 'scatter' as const,
|
||||
mode: 'markers' as const,
|
||||
name,
|
||||
x: trialList.map(t => getObjValue(t, i)),
|
||||
y: trialList.map(t => getObjValue(t, j)),
|
||||
text: trialList.map(buildHoverText),
|
||||
hoverinfo: 'text' as const,
|
||||
marker: {
|
||||
color,
|
||||
size,
|
||||
symbol,
|
||||
opacity,
|
||||
line: { color: '#fff', width: 1 }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Sort Pareto trials by first objective for line connection
|
||||
const sortedParetoTrials = useMemo(() => {
|
||||
const [i] = selectedObjectives;
|
||||
return [...paretoTrials].sort((a, b) => getObjValue(a, i) - getObjValue(b, i));
|
||||
}, [paretoTrials, selectedObjectives]);
|
||||
|
||||
// Create Pareto front line trace (2D only)
|
||||
const createParetoLine = () => {
|
||||
if (!showParetoLine || viewMode === '3d' || sortedParetoTrials.length < 2) return null;
|
||||
const [i, j] = selectedObjectives;
|
||||
return {
|
||||
type: 'scatter' as const,
|
||||
mode: 'lines' as const,
|
||||
name: 'Pareto Front',
|
||||
x: sortedParetoTrials.map(t => getObjValue(t, i)),
|
||||
y: sortedParetoTrials.map(t => getObjValue(t, j)),
|
||||
line: {
|
||||
color: '#10B981',
|
||||
width: 2,
|
||||
dash: 'dot'
|
||||
},
|
||||
hoverinfo: 'skip' as const,
|
||||
showlegend: false
|
||||
};
|
||||
};
|
||||
|
||||
const traces = [
|
||||
// Infeasible trials (background, red X)
|
||||
...(showInfeasible && infeasibleTrials.length > 0 ? [
|
||||
createTrace(infeasibleTrials, `Infeasible (${infeasibleTrials.length})`, '#EF4444', 'x', 7, 0.4)
|
||||
] : []),
|
||||
// FEA trials (blue circles)
|
||||
createTrace(feaTrials, `FEA (${feaTrials.length})`, '#3B82F6', 'circle', 8, 0.6),
|
||||
// NN trials (purple diamonds)
|
||||
createTrace(nnTrials, `NN (${nnTrials.length})`, '#A855F7', 'diamond', 8, 0.5),
|
||||
// Pareto front line (2D only)
|
||||
createParetoLine(),
|
||||
// Pareto front points (highlighted)
|
||||
createTrace(sortedParetoTrials, `Pareto (${sortedParetoTrials.length})`, '#10B981', 'star', 14, 1.0)
|
||||
].filter(trace => trace && (trace.x as number[]).length > 0);
|
||||
|
||||
const [i, j, k] = selectedObjectives;
|
||||
|
||||
// Dark mode color scheme
|
||||
const colors = {
|
||||
text: '#E5E7EB',
|
||||
textMuted: '#9CA3AF',
|
||||
grid: 'rgba(255,255,255,0.1)',
|
||||
zeroline: 'rgba(255,255,255,0.2)',
|
||||
legendBg: 'rgba(30,30,30,0.9)',
|
||||
legendBorder: 'rgba(255,255,255,0.1)'
|
||||
};
|
||||
|
||||
const layout: any = viewMode === '3d' && objectives.length >= 3
|
||||
? {
|
||||
height,
|
||||
margin: { l: 50, r: 50, t: 30, b: 50 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
scene: {
|
||||
xaxis: {
|
||||
title: { text: objectives[i]?.name || 'Objective 1', font: { color: colors.text } },
|
||||
gridcolor: colors.grid,
|
||||
zerolinecolor: colors.zeroline,
|
||||
tickfont: { color: colors.textMuted }
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: objectives[j]?.name || 'Objective 2', font: { color: colors.text } },
|
||||
gridcolor: colors.grid,
|
||||
zerolinecolor: colors.zeroline,
|
||||
tickfont: { color: colors.textMuted }
|
||||
},
|
||||
zaxis: {
|
||||
title: { text: objectives[k]?.name || 'Objective 3', font: { color: colors.text } },
|
||||
gridcolor: colors.grid,
|
||||
zerolinecolor: colors.zeroline,
|
||||
tickfont: { color: colors.textMuted }
|
||||
},
|
||||
bgcolor: 'transparent'
|
||||
},
|
||||
legend: {
|
||||
x: 1,
|
||||
y: 1,
|
||||
font: { color: colors.text },
|
||||
bgcolor: colors.legendBg,
|
||||
bordercolor: colors.legendBorder,
|
||||
borderwidth: 1
|
||||
},
|
||||
font: { family: 'Inter, system-ui, sans-serif', color: colors.text }
|
||||
}
|
||||
: {
|
||||
height,
|
||||
margin: { l: 60, r: 30, t: 30, b: 60 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
xaxis: {
|
||||
title: { text: objectives[i]?.name || 'Objective 1', font: { color: colors.text } },
|
||||
gridcolor: colors.grid,
|
||||
zerolinecolor: colors.zeroline,
|
||||
tickfont: { color: colors.textMuted }
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: objectives[j]?.name || 'Objective 2', font: { color: colors.text } },
|
||||
gridcolor: colors.grid,
|
||||
zerolinecolor: colors.zeroline,
|
||||
tickfont: { color: colors.textMuted }
|
||||
},
|
||||
legend: {
|
||||
x: 1,
|
||||
y: 1,
|
||||
xanchor: 'right',
|
||||
font: { color: colors.text },
|
||||
bgcolor: colors.legendBg,
|
||||
bordercolor: colors.legendBorder,
|
||||
borderwidth: 1
|
||||
},
|
||||
font: { family: 'Inter, system-ui, sans-serif', color: colors.text },
|
||||
hovermode: 'closest' as const
|
||||
};
|
||||
|
||||
if (!trials.length) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64 text-dark-400">
|
||||
No trial data available
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
{/* Stats Bar */}
|
||||
<div className="flex gap-4 mb-4 text-sm">
|
||||
<div className="flex items-center gap-2 px-3 py-1.5 bg-dark-700 rounded-lg">
|
||||
<div className="w-3 h-3 bg-green-500 rounded-full" />
|
||||
<span className="text-dark-300">Pareto:</span>
|
||||
<span className="text-green-400 font-medium">{stats.paretoCount}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-2 px-3 py-1.5 bg-dark-700 rounded-lg">
|
||||
<div className="w-3 h-3 bg-blue-500 rounded-full" />
|
||||
<span className="text-dark-300">FEA:</span>
|
||||
<span className="text-blue-400 font-medium">{stats.feaCount}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-2 px-3 py-1.5 bg-dark-700 rounded-lg">
|
||||
<div className="w-3 h-3 bg-purple-500 rounded-full" />
|
||||
<span className="text-dark-300">NN:</span>
|
||||
<span className="text-purple-400 font-medium">{stats.nnCount}</span>
|
||||
</div>
|
||||
{stats.infeasibleCount > 0 && (
|
||||
<div className="flex items-center gap-2 px-3 py-1.5 bg-dark-700 rounded-lg">
|
||||
<div className="w-3 h-3 bg-red-500 rounded-full" />
|
||||
<span className="text-dark-300">Infeasible:</span>
|
||||
<span className="text-red-400 font-medium">{stats.infeasibleCount}</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Controls */}
|
||||
<div className="flex gap-4 items-center justify-between mb-3">
|
||||
<div className="flex gap-2 items-center">
|
||||
{objectives.length >= 3 && (
|
||||
<div className="flex rounded-lg overflow-hidden border border-dark-600">
|
||||
<button
|
||||
onClick={() => setViewMode('2d')}
|
||||
className={`px-3 py-1.5 text-sm font-medium transition-colors ${
|
||||
viewMode === '2d'
|
||||
? 'bg-primary-600 text-white'
|
||||
: 'bg-dark-700 text-dark-300 hover:bg-dark-600 hover:text-white'
|
||||
}`}
|
||||
>
|
||||
2D
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setViewMode('3d')}
|
||||
className={`px-3 py-1.5 text-sm font-medium transition-colors ${
|
||||
viewMode === '3d'
|
||||
? 'bg-primary-600 text-white'
|
||||
: 'bg-dark-700 text-dark-300 hover:bg-dark-600 hover:text-white'
|
||||
}`}
|
||||
>
|
||||
3D
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Objective selectors */}
|
||||
<div className="flex gap-2 items-center text-sm">
|
||||
<label className="text-dark-400">X:</label>
|
||||
<select
|
||||
value={selectedObjectives[0]}
|
||||
onChange={(e) => setSelectedObjectives([parseInt(e.target.value), selectedObjectives[1], selectedObjectives[2]])}
|
||||
className="px-2 py-1.5 bg-dark-700 border border-dark-600 rounded text-white text-sm"
|
||||
>
|
||||
{objectives.map((obj, idx) => (
|
||||
<option key={idx} value={idx}>{obj.name}</option>
|
||||
))}
|
||||
</select>
|
||||
|
||||
<label className="text-dark-400 ml-2">Y:</label>
|
||||
<select
|
||||
value={selectedObjectives[1]}
|
||||
onChange={(e) => setSelectedObjectives([selectedObjectives[0], parseInt(e.target.value), selectedObjectives[2]])}
|
||||
className="px-2 py-1.5 bg-dark-700 border border-dark-600 rounded text-white text-sm"
|
||||
>
|
||||
{objectives.map((obj, idx) => (
|
||||
<option key={idx} value={idx}>{obj.name}</option>
|
||||
))}
|
||||
</select>
|
||||
|
||||
{viewMode === '3d' && objectives.length >= 3 && (
|
||||
<>
|
||||
<label className="text-dark-400 ml-2">Z:</label>
|
||||
<select
|
||||
value={selectedObjectives[2]}
|
||||
onChange={(e) => setSelectedObjectives([selectedObjectives[0], selectedObjectives[1], parseInt(e.target.value)])}
|
||||
className="px-2 py-1.5 bg-dark-700 border border-dark-600 rounded text-white text-sm"
|
||||
>
|
||||
{objectives.map((obj, idx) => (
|
||||
<option key={idx} value={idx}>{obj.name}</option>
|
||||
))}
|
||||
</select>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Plot
|
||||
data={traces as any}
|
||||
layout={layout}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
displaylogo: false,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
toImageButtonOptions: {
|
||||
format: 'png',
|
||||
filename: 'pareto_front',
|
||||
height: 800,
|
||||
width: 1200,
|
||||
scale: 2
|
||||
}
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
{/* Pareto Front Table for 2D view */}
|
||||
{viewMode === '2d' && sortedParetoTrials.length > 0 && (
|
||||
<div className="mt-4 max-h-48 overflow-auto">
|
||||
<table className="w-full text-sm">
|
||||
<thead className="sticky top-0 bg-dark-800">
|
||||
<tr className="border-b border-dark-600">
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Trial</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">{objectives[i]?.name || 'Obj 1'}</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">{objectives[j]?.name || 'Obj 2'}</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Source</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{sortedParetoTrials.slice(0, 10).map(trial => (
|
||||
<tr key={trial.trial_number} className="border-b border-dark-700 hover:bg-dark-750">
|
||||
<td className="py-2 px-3 font-mono text-white">#{trial.trial_number}</td>
|
||||
<td className="py-2 px-3 font-mono text-green-400">
|
||||
{getObjValue(trial, i).toExponential(4)}
|
||||
</td>
|
||||
<td className="py-2 px-3 font-mono text-green-400">
|
||||
{getObjValue(trial, j).toExponential(4)}
|
||||
</td>
|
||||
<td className="py-2 px-3">
|
||||
<span className={`px-2 py-0.5 rounded text-xs ${
|
||||
(trial.source || trial.user_attrs?.source) === 'NN'
|
||||
? 'bg-purple-500/20 text-purple-400'
|
||||
: 'bg-blue-500/20 text-blue-400'
|
||||
}`}>
|
||||
{trial.source || trial.user_attrs?.source || 'FEA'}
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
{sortedParetoTrials.length > 10 && (
|
||||
<div className="text-center py-2 text-dark-500 text-xs">
|
||||
Showing 10 of {sortedParetoTrials.length} Pareto-optimal solutions
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
import { useMemo } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
import { TrendingUp, TrendingDown, Minus } from 'lucide-react';
|
||||
|
||||
interface Run {
|
||||
run_id: number;
|
||||
name: string;
|
||||
source: 'FEA' | 'NN';
|
||||
trial_count: number;
|
||||
best_value: number | null;
|
||||
avg_value: number | null;
|
||||
first_trial: string | null;
|
||||
last_trial: string | null;
|
||||
}
|
||||
|
||||
interface PlotlyRunComparisonProps {
|
||||
runs: Run[];
|
||||
height?: number;
|
||||
}
|
||||
|
||||
export function PlotlyRunComparison({ runs, height = 400 }: PlotlyRunComparisonProps) {
|
||||
const chartData = useMemo(() => {
|
||||
if (runs.length === 0) return null;
|
||||
|
||||
// Separate FEA and NN runs
|
||||
const feaRuns = runs.filter(r => r.source === 'FEA');
|
||||
const nnRuns = runs.filter(r => r.source === 'NN');
|
||||
|
||||
// Create bar chart for trial counts
|
||||
const trialCountData = {
|
||||
x: runs.map(r => r.name),
|
||||
y: runs.map(r => r.trial_count),
|
||||
type: 'bar' as const,
|
||||
name: 'Trial Count',
|
||||
marker: {
|
||||
color: runs.map(r => r.source === 'NN' ? 'rgba(147, 51, 234, 0.8)' : 'rgba(59, 130, 246, 0.8)'),
|
||||
line: { color: runs.map(r => r.source === 'NN' ? 'rgb(147, 51, 234)' : 'rgb(59, 130, 246)'), width: 1 }
|
||||
},
|
||||
hovertemplate: '<b>%{x}</b><br>Trials: %{y}<extra></extra>'
|
||||
};
|
||||
|
||||
// Create line chart for best values
|
||||
const bestValueData = {
|
||||
x: runs.map(r => r.name),
|
||||
y: runs.map(r => r.best_value),
|
||||
type: 'scatter' as const,
|
||||
mode: 'lines+markers' as const,
|
||||
name: 'Best Value',
|
||||
yaxis: 'y2',
|
||||
line: { color: 'rgba(16, 185, 129, 1)', width: 2 },
|
||||
marker: { size: 8, color: 'rgba(16, 185, 129, 1)' },
|
||||
hovertemplate: '<b>%{x}</b><br>Best: %{y:.4e}<extra></extra>'
|
||||
};
|
||||
|
||||
return { trialCountData, bestValueData, feaRuns, nnRuns };
|
||||
}, [runs]);
|
||||
|
||||
// Calculate statistics
|
||||
const stats = useMemo(() => {
|
||||
if (runs.length === 0) return null;
|
||||
|
||||
const totalTrials = runs.reduce((sum, r) => sum + r.trial_count, 0);
|
||||
const feaTrials = runs.filter(r => r.source === 'FEA').reduce((sum, r) => sum + r.trial_count, 0);
|
||||
const nnTrials = runs.filter(r => r.source === 'NN').reduce((sum, r) => sum + r.trial_count, 0);
|
||||
|
||||
const bestValues = runs.map(r => r.best_value).filter((v): v is number => v !== null);
|
||||
const overallBest = bestValues.length > 0 ? Math.min(...bestValues) : null;
|
||||
|
||||
// Calculate improvement from first FEA run to overall best
|
||||
const feaRuns = runs.filter(r => r.source === 'FEA');
|
||||
const firstFEA = feaRuns.length > 0 ? feaRuns[0].best_value : null;
|
||||
const improvement = firstFEA && overallBest ? ((firstFEA - overallBest) / Math.abs(firstFEA)) * 100 : null;
|
||||
|
||||
return {
|
||||
totalTrials,
|
||||
feaTrials,
|
||||
nnTrials,
|
||||
overallBest,
|
||||
improvement,
|
||||
totalRuns: runs.length,
|
||||
feaRuns: runs.filter(r => r.source === 'FEA').length,
|
||||
nnRuns: runs.filter(r => r.source === 'NN').length
|
||||
};
|
||||
}, [runs]);
|
||||
|
||||
if (!chartData || !stats) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64 text-dark-400">
|
||||
No run data available
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{/* Stats Summary */}
|
||||
<div className="grid grid-cols-2 md:grid-cols-4 lg:grid-cols-6 gap-3">
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">Total Runs</div>
|
||||
<div className="text-xl font-bold text-white">{stats.totalRuns}</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">Total Trials</div>
|
||||
<div className="text-xl font-bold text-white">{stats.totalTrials}</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">FEA Trials</div>
|
||||
<div className="text-xl font-bold text-blue-400">{stats.feaTrials}</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">NN Trials</div>
|
||||
<div className="text-xl font-bold text-purple-400">{stats.nnTrials}</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">Best Value</div>
|
||||
<div className="text-xl font-bold text-green-400">
|
||||
{stats.overallBest !== null ? stats.overallBest.toExponential(3) : 'N/A'}
|
||||
</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-3">
|
||||
<div className="text-xs text-dark-400 mb-1">Improvement</div>
|
||||
<div className="text-xl font-bold text-primary-400 flex items-center gap-1">
|
||||
{stats.improvement !== null ? (
|
||||
<>
|
||||
{stats.improvement > 0 ? <TrendingDown className="w-4 h-4" /> :
|
||||
stats.improvement < 0 ? <TrendingUp className="w-4 h-4" /> :
|
||||
<Minus className="w-4 h-4" />}
|
||||
{Math.abs(stats.improvement).toFixed(1)}%
|
||||
</>
|
||||
) : 'N/A'}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Chart */}
|
||||
<Plot
|
||||
data={[chartData.trialCountData, chartData.bestValueData]}
|
||||
layout={{
|
||||
height,
|
||||
margin: { l: 60, r: 60, t: 40, b: 100 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
font: { color: '#9ca3af', size: 11 },
|
||||
showlegend: true,
|
||||
legend: {
|
||||
orientation: 'h',
|
||||
y: 1.12,
|
||||
x: 0.5,
|
||||
xanchor: 'center',
|
||||
bgcolor: 'transparent'
|
||||
},
|
||||
xaxis: {
|
||||
tickangle: -45,
|
||||
gridcolor: 'rgba(75, 85, 99, 0.3)',
|
||||
linecolor: 'rgba(75, 85, 99, 0.5)',
|
||||
tickfont: { size: 10 }
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: 'Trial Count' },
|
||||
gridcolor: 'rgba(75, 85, 99, 0.3)',
|
||||
linecolor: 'rgba(75, 85, 99, 0.5)',
|
||||
zeroline: false
|
||||
},
|
||||
yaxis2: {
|
||||
title: { text: 'Best Value' },
|
||||
overlaying: 'y',
|
||||
side: 'right',
|
||||
gridcolor: 'rgba(75, 85, 99, 0.1)',
|
||||
linecolor: 'rgba(75, 85, 99, 0.5)',
|
||||
zeroline: false,
|
||||
tickformat: '.2e'
|
||||
},
|
||||
bargap: 0.3,
|
||||
hovermode: 'x unified'
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
displaylogo: false,
|
||||
modeBarButtonsToRemove: ['select2d', 'lasso2d', 'autoScale2d']
|
||||
}}
|
||||
className="w-full"
|
||||
useResizeHandler
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
{/* Runs Table */}
|
||||
<div className="overflow-x-auto">
|
||||
<table className="w-full text-sm">
|
||||
<thead>
|
||||
<tr className="border-b border-dark-600">
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Run Name</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Source</th>
|
||||
<th className="text-right py-2 px-3 text-dark-400 font-medium">Trials</th>
|
||||
<th className="text-right py-2 px-3 text-dark-400 font-medium">Best Value</th>
|
||||
<th className="text-right py-2 px-3 text-dark-400 font-medium">Avg Value</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Duration</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{runs.map((run) => {
|
||||
// Calculate duration if times available
|
||||
let duration = '-';
|
||||
if (run.first_trial && run.last_trial) {
|
||||
const start = new Date(run.first_trial);
|
||||
const end = new Date(run.last_trial);
|
||||
const diffMs = end.getTime() - start.getTime();
|
||||
const diffMins = Math.round(diffMs / 60000);
|
||||
if (diffMins < 60) {
|
||||
duration = `${diffMins}m`;
|
||||
} else {
|
||||
const hours = Math.floor(diffMins / 60);
|
||||
const mins = diffMins % 60;
|
||||
duration = `${hours}h ${mins}m`;
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<tr key={run.run_id} className="border-b border-dark-700 hover:bg-dark-750">
|
||||
<td className="py-2 px-3 font-mono text-white">{run.name}</td>
|
||||
<td className="py-2 px-3">
|
||||
<span className={`px-2 py-0.5 rounded text-xs ${
|
||||
run.source === 'NN'
|
||||
? 'bg-purple-500/20 text-purple-400'
|
||||
: 'bg-blue-500/20 text-blue-400'
|
||||
}`}>
|
||||
{run.source}
|
||||
</span>
|
||||
</td>
|
||||
<td className="py-2 px-3 text-right font-mono text-white">{run.trial_count}</td>
|
||||
<td className="py-2 px-3 text-right font-mono text-green-400">
|
||||
{run.best_value !== null ? run.best_value.toExponential(4) : '-'}
|
||||
</td>
|
||||
<td className="py-2 px-3 text-right font-mono text-dark-300">
|
||||
{run.avg_value !== null ? run.avg_value.toExponential(4) : '-'}
|
||||
</td>
|
||||
<td className="py-2 px-3 text-dark-400">{duration}</td>
|
||||
</tr>
|
||||
);
|
||||
})}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default PlotlyRunComparison;
|
||||
@@ -1,202 +0,0 @@
|
||||
import { useMemo } from 'react';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
interface TrialData {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
source?: 'FEA' | 'NN' | 'V10_FEA';
|
||||
user_attrs?: Record<string, any>;
|
||||
}
|
||||
|
||||
interface PlotlySurrogateQualityProps {
|
||||
trials: TrialData[];
|
||||
height?: number;
|
||||
}
|
||||
|
||||
export function PlotlySurrogateQuality({
|
||||
trials,
|
||||
height = 400
|
||||
}: PlotlySurrogateQualityProps) {
|
||||
const { feaTrials, nnTrials, timeline } = useMemo(() => {
|
||||
const fea = trials.filter(t => t.source === 'FEA' || t.source === 'V10_FEA');
|
||||
const nn = trials.filter(t => t.source === 'NN');
|
||||
|
||||
// Sort by trial number for timeline
|
||||
const sorted = [...trials].sort((a, b) => a.trial_number - b.trial_number);
|
||||
|
||||
// Calculate source distribution over time
|
||||
const timeline: { trial: number; feaCount: number; nnCount: number }[] = [];
|
||||
let feaCount = 0;
|
||||
let nnCount = 0;
|
||||
|
||||
sorted.forEach(t => {
|
||||
if (t.source === 'NN') nnCount++;
|
||||
else feaCount++;
|
||||
|
||||
timeline.push({
|
||||
trial: t.trial_number,
|
||||
feaCount,
|
||||
nnCount
|
||||
});
|
||||
});
|
||||
|
||||
return {
|
||||
feaTrials: fea,
|
||||
nnTrials: nn,
|
||||
timeline
|
||||
};
|
||||
}, [trials]);
|
||||
|
||||
if (nnTrials.length === 0) {
|
||||
return (
|
||||
<div className="h-64 flex items-center justify-center text-dark-400">
|
||||
<p>No neural network evaluations in this study</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Objective distribution by source
|
||||
const feaObjectives = feaTrials.map(t => t.values[0]).filter(v => v !== undefined && !isNaN(v));
|
||||
const nnObjectives = nnTrials.map(t => t.values[0]).filter(v => v !== undefined && !isNaN(v));
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Source Distribution Over Time */}
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
x: timeline.map(t => t.trial),
|
||||
y: timeline.map(t => t.feaCount),
|
||||
type: 'scatter',
|
||||
mode: 'lines',
|
||||
name: 'FEA Cumulative',
|
||||
line: { color: '#3b82f6', width: 2 },
|
||||
fill: 'tozeroy',
|
||||
fillcolor: 'rgba(59, 130, 246, 0.2)'
|
||||
},
|
||||
{
|
||||
x: timeline.map(t => t.trial),
|
||||
y: timeline.map(t => t.nnCount),
|
||||
type: 'scatter',
|
||||
mode: 'lines',
|
||||
name: 'NN Cumulative',
|
||||
line: { color: '#a855f7', width: 2 },
|
||||
fill: 'tozeroy',
|
||||
fillcolor: 'rgba(168, 85, 247, 0.2)'
|
||||
}
|
||||
]}
|
||||
layout={{
|
||||
title: {
|
||||
text: 'Evaluation Source Over Time',
|
||||
font: { color: '#fff', size: 14 }
|
||||
},
|
||||
height: height * 0.6,
|
||||
margin: { l: 60, r: 30, t: 50, b: 50 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
xaxis: {
|
||||
title: { text: 'Trial Number', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.05)'
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: 'Cumulative Count', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.1)'
|
||||
},
|
||||
legend: {
|
||||
font: { color: '#888' },
|
||||
bgcolor: 'rgba(0,0,0,0.5)',
|
||||
orientation: 'h',
|
||||
y: 1.1
|
||||
},
|
||||
showlegend: true
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
{/* Objective Distribution by Source */}
|
||||
<Plot
|
||||
data={[
|
||||
{
|
||||
x: feaObjectives,
|
||||
type: 'histogram',
|
||||
name: 'FEA',
|
||||
marker: { color: 'rgba(59, 130, 246, 0.7)' },
|
||||
opacity: 0.8
|
||||
} as any,
|
||||
{
|
||||
x: nnObjectives,
|
||||
type: 'histogram',
|
||||
name: 'NN',
|
||||
marker: { color: 'rgba(168, 85, 247, 0.7)' },
|
||||
opacity: 0.8
|
||||
} as any
|
||||
]}
|
||||
layout={{
|
||||
title: {
|
||||
text: 'Objective Distribution by Source',
|
||||
font: { color: '#fff', size: 14 }
|
||||
},
|
||||
height: height * 0.5,
|
||||
margin: { l: 60, r: 30, t: 50, b: 50 },
|
||||
paper_bgcolor: 'transparent',
|
||||
plot_bgcolor: 'transparent',
|
||||
xaxis: {
|
||||
title: { text: 'Objective Value', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.05)'
|
||||
},
|
||||
yaxis: {
|
||||
title: { text: 'Count', font: { color: '#888' } },
|
||||
tickfont: { color: '#888' },
|
||||
gridcolor: 'rgba(255,255,255,0.1)'
|
||||
},
|
||||
barmode: 'overlay',
|
||||
legend: {
|
||||
font: { color: '#888' },
|
||||
bgcolor: 'rgba(0,0,0,0.5)',
|
||||
orientation: 'h',
|
||||
y: 1.1
|
||||
},
|
||||
showlegend: true
|
||||
}}
|
||||
config={{
|
||||
displayModeBar: true,
|
||||
modeBarButtonsToRemove: ['lasso2d', 'select2d'],
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
|
||||
{/* FEA vs NN Best Values Comparison */}
|
||||
{feaObjectives.length > 0 && nnObjectives.length > 0 && (
|
||||
<div className="grid grid-cols-2 gap-4 mt-4">
|
||||
<div className="bg-dark-750 rounded-lg p-4 border border-dark-600">
|
||||
<div className="text-xs text-dark-400 uppercase mb-2">FEA Best</div>
|
||||
<div className="text-xl font-mono text-blue-400">
|
||||
{Math.min(...feaObjectives).toExponential(4)}
|
||||
</div>
|
||||
<div className="text-xs text-dark-500 mt-1">
|
||||
from {feaObjectives.length} evaluations
|
||||
</div>
|
||||
</div>
|
||||
<div className="bg-dark-750 rounded-lg p-4 border border-dark-600">
|
||||
<div className="text-xs text-dark-400 uppercase mb-2">NN Best</div>
|
||||
<div className="text-xl font-mono text-purple-400">
|
||||
{Math.min(...nnObjectives).toExponential(4)}
|
||||
</div>
|
||||
<div className="text-xs text-dark-500 mt-1">
|
||||
from {nnObjectives.length} predictions
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
# Plotly Chart Components
|
||||
|
||||
Interactive visualization components using Plotly.js for the Atomizer Dashboard.
|
||||
|
||||
## Overview
|
||||
|
||||
These components provide enhanced interactivity compared to Recharts:
|
||||
- Native zoom, pan, and selection
|
||||
- Export to PNG/SVG
|
||||
- Hover tooltips with detailed information
|
||||
- Brush filtering (parallel coordinates)
|
||||
- 3D visualization support
|
||||
|
||||
## Components
|
||||
|
||||
### PlotlyParallelCoordinates
|
||||
|
||||
Multi-dimensional data visualization showing relationships between all variables.
|
||||
|
||||
```tsx
|
||||
import { PlotlyParallelCoordinates } from '../components/plotly';
|
||||
|
||||
<PlotlyParallelCoordinates
|
||||
trials={allTrials}
|
||||
objectives={studyMetadata.objectives}
|
||||
designVariables={studyMetadata.design_variables}
|
||||
paretoFront={paretoFront}
|
||||
height={450}
|
||||
/>
|
||||
```
|
||||
|
||||
**Props:**
|
||||
| Prop | Type | Description |
|
||||
|------|------|-------------|
|
||||
| trials | Trial[] | All trial data |
|
||||
| objectives | Objective[] | Objective definitions |
|
||||
| designVariables | DesignVariable[] | Design variable definitions |
|
||||
| paretoFront | Trial[] | Pareto-optimal trials (optional) |
|
||||
| height | number | Chart height in pixels |
|
||||
|
||||
**Features:**
|
||||
- Drag on axes to filter data
|
||||
- Double-click to reset filters
|
||||
- Color coding: FEA (blue), NN (orange), Pareto (green)
|
||||
|
||||
### PlotlyParetoPlot
|
||||
|
||||
2D/3D scatter plot for Pareto front visualization.
|
||||
|
||||
```tsx
|
||||
<PlotlyParetoPlot
|
||||
trials={allTrials}
|
||||
paretoFront={paretoFront}
|
||||
objectives={studyMetadata.objectives}
|
||||
height={350}
|
||||
/>
|
||||
```
|
||||
|
||||
**Props:**
|
||||
| Prop | Type | Description |
|
||||
|------|------|-------------|
|
||||
| trials | Trial[] | All trial data |
|
||||
| paretoFront | Trial[] | Pareto-optimal trials |
|
||||
| objectives | Objective[] | Objective definitions |
|
||||
| height | number | Chart height in pixels |
|
||||
|
||||
**Features:**
|
||||
- Toggle between 2D and 3D views
|
||||
- Axis selector for multi-objective problems
|
||||
- Click to select trials
|
||||
- Hover for trial details
|
||||
|
||||
### PlotlyConvergencePlot
|
||||
|
||||
Optimization progress over trials.
|
||||
|
||||
```tsx
|
||||
<PlotlyConvergencePlot
|
||||
trials={allTrials}
|
||||
objectiveIndex={0}
|
||||
objectiveName="weighted_objective"
|
||||
direction="minimize"
|
||||
height={350}
|
||||
/>
|
||||
```
|
||||
|
||||
**Props:**
|
||||
| Prop | Type | Description |
|
||||
|------|------|-------------|
|
||||
| trials | Trial[] | All trial data |
|
||||
| objectiveIndex | number | Which objective to plot |
|
||||
| objectiveName | string | Objective display name |
|
||||
| direction | 'minimize' \| 'maximize' | Optimization direction |
|
||||
| height | number | Chart height |
|
||||
| showRangeSlider | boolean | Show zoom slider |
|
||||
|
||||
**Features:**
|
||||
- Scatter points for each trial
|
||||
- Best-so-far step line
|
||||
- Range slider for zooming
|
||||
- FEA vs NN differentiation
|
||||
|
||||
### PlotlyParameterImportance
|
||||
|
||||
Correlation-based parameter sensitivity analysis.
|
||||
|
||||
```tsx
|
||||
<PlotlyParameterImportance
|
||||
trials={allTrials}
|
||||
designVariables={studyMetadata.design_variables}
|
||||
objectiveIndex={0}
|
||||
objectiveName="weighted_objective"
|
||||
height={350}
|
||||
/>
|
||||
```
|
||||
|
||||
**Props:**
|
||||
| Prop | Type | Description |
|
||||
|------|------|-------------|
|
||||
| trials | Trial[] | All trial data |
|
||||
| designVariables | DesignVariable[] | Design variables |
|
||||
| objectiveIndex | number | Which objective |
|
||||
| objectiveName | string | Objective display name |
|
||||
| height | number | Chart height |
|
||||
|
||||
**Features:**
|
||||
- Horizontal bar chart of correlations
|
||||
- Sort by importance or name
|
||||
- Color: Red (positive), Green (negative)
|
||||
- Pearson correlation coefficient
|
||||
|
||||
## Bundle Optimization
|
||||
|
||||
To minimize bundle size, we use:
|
||||
|
||||
1. **plotly.js-basic-dist**: Smaller bundle (~1MB vs 3.5MB)
|
||||
- Includes: scatter, bar, parcoords
|
||||
- Excludes: 3D plots, maps, animations
|
||||
|
||||
2. **Lazy Loading**: Components loaded on demand
|
||||
```tsx
|
||||
const PlotlyParetoPlot = lazy(() =>
|
||||
import('./plotly/PlotlyParetoPlot')
|
||||
.then(m => ({ default: m.PlotlyParetoPlot }))
|
||||
);
|
||||
```
|
||||
|
||||
3. **Code Splitting**: Vite config separates Plotly into its own chunk
|
||||
```ts
|
||||
manualChunks: {
|
||||
plotly: ['plotly.js-basic-dist', 'react-plotly.js']
|
||||
}
|
||||
```
|
||||
|
||||
## Usage with Suspense
|
||||
|
||||
Always wrap Plotly components with Suspense:
|
||||
|
||||
```tsx
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParetoPlot {...props} />
|
||||
</Suspense>
|
||||
```
|
||||
|
||||
## Type Definitions
|
||||
|
||||
```typescript
|
||||
interface Trial {
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs?: Record<string, any>;
|
||||
source?: 'FEA' | 'NN' | 'V10_FEA';
|
||||
}
|
||||
|
||||
interface Objective {
|
||||
name: string;
|
||||
direction?: 'minimize' | 'maximize';
|
||||
unit?: string;
|
||||
}
|
||||
|
||||
interface DesignVariable {
|
||||
name: string;
|
||||
unit?: string;
|
||||
min?: number;
|
||||
max?: number;
|
||||
}
|
||||
```
|
||||
|
||||
## Styling
|
||||
|
||||
Components use transparent backgrounds for dark theme compatibility:
|
||||
- `paper_bgcolor: 'rgba(0,0,0,0)'`
|
||||
- `plot_bgcolor: 'rgba(0,0,0,0)'`
|
||||
- Font: Inter, system-ui, sans-serif
|
||||
- Grid colors: Tailwind gray palette
|
||||
|
||||
## Export Options
|
||||
|
||||
All Plotly charts include a mode bar with:
|
||||
- Download PNG
|
||||
- Download SVG (via menu)
|
||||
- Zoom, Pan, Reset
|
||||
- Auto-scale
|
||||
|
||||
Configure export in the `config` prop:
|
||||
```tsx
|
||||
config={{
|
||||
toImageButtonOptions: {
|
||||
format: 'png',
|
||||
filename: 'my_chart',
|
||||
height: 600,
|
||||
width: 1200,
|
||||
scale: 2
|
||||
}
|
||||
}}
|
||||
```
|
||||
@@ -1,15 +0,0 @@
|
||||
/**
|
||||
* Plotly-based interactive chart components
|
||||
*
|
||||
* These components provide enhanced interactivity compared to Recharts:
|
||||
* - Native zoom/pan
|
||||
* - Brush selection on axes
|
||||
* - 3D views for multi-objective problems
|
||||
* - Export to PNG/SVG
|
||||
* - Detailed hover tooltips
|
||||
*/
|
||||
|
||||
export { PlotlyParallelCoordinates } from './PlotlyParallelCoordinates';
|
||||
export { PlotlyParetoPlot } from './PlotlyParetoPlot';
|
||||
export { PlotlyConvergencePlot } from './PlotlyConvergencePlot';
|
||||
export { PlotlyParameterImportance } from './PlotlyParameterImportance';
|
||||
@@ -3,3 +3,27 @@ export { useCanvasStore } from './useCanvasStore';
|
||||
export type { OptimizationConfig } from './useCanvasStore';
|
||||
export { useCanvasChat } from './useCanvasChat';
|
||||
export { useIntentParser } from './useIntentParser';
|
||||
|
||||
// Spec Store (AtomizerSpec v2.0)
|
||||
export {
|
||||
useSpecStore,
|
||||
useSpec,
|
||||
useSpecLoading,
|
||||
useSpecError,
|
||||
useSpecValidation,
|
||||
useSelectedNodeId,
|
||||
useSelectedEdgeId,
|
||||
useSpecHash,
|
||||
useSpecIsDirty,
|
||||
useDesignVariables,
|
||||
useExtractors,
|
||||
useObjectives,
|
||||
useConstraints,
|
||||
useCanvasEdges,
|
||||
useSelectedNode,
|
||||
} from './useSpecStore';
|
||||
|
||||
// WebSocket Sync
|
||||
export { useSpecWebSocket } from './useSpecWebSocket';
|
||||
export type { ConnectionStatus } from './useSpecWebSocket';
|
||||
export { ConnectionStatusIndicator } from '../components/canvas/ConnectionStatusIndicator';
|
||||
|
||||
@@ -11,12 +11,25 @@ export interface CanvasState {
|
||||
studyPath?: string;
|
||||
}
|
||||
|
||||
export interface CanvasModification {
|
||||
action: 'add_node' | 'update_node' | 'remove_node' | 'add_edge' | 'remove_edge';
|
||||
nodeType?: string;
|
||||
nodeId?: string;
|
||||
edgeId?: string;
|
||||
data?: Record<string, any>;
|
||||
source?: string;
|
||||
target?: string;
|
||||
position?: { x: number; y: number };
|
||||
}
|
||||
|
||||
interface UseChatOptions {
|
||||
studyId?: string | null;
|
||||
mode?: ChatMode;
|
||||
useWebSocket?: boolean;
|
||||
canvasState?: CanvasState | null;
|
||||
onError?: (error: string) => void;
|
||||
onCanvasModification?: (modification: CanvasModification) => void;
|
||||
onSpecUpdated?: (spec: any) => void; // Called when Claude modifies the spec
|
||||
}
|
||||
|
||||
interface ChatState {
|
||||
@@ -35,6 +48,8 @@ export function useChat({
|
||||
useWebSocket = true,
|
||||
canvasState: initialCanvasState,
|
||||
onError,
|
||||
onCanvasModification,
|
||||
onSpecUpdated,
|
||||
}: UseChatOptions = {}) {
|
||||
const [state, setState] = useState<ChatState>({
|
||||
messages: [],
|
||||
@@ -49,6 +64,23 @@ export function useChat({
|
||||
// Track canvas state for sending with messages
|
||||
const canvasStateRef = useRef<CanvasState | null>(initialCanvasState || null);
|
||||
|
||||
// Sync mode prop changes to internal state (triggers WebSocket reconnect)
|
||||
useEffect(() => {
|
||||
if (mode !== state.mode) {
|
||||
console.log(`[useChat] Mode prop changed from ${state.mode} to ${mode}, triggering reconnect`);
|
||||
// Close existing WebSocket
|
||||
wsRef.current?.close();
|
||||
wsRef.current = null;
|
||||
// Update internal state to trigger reconnect
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
mode,
|
||||
sessionId: null,
|
||||
isConnected: false,
|
||||
}));
|
||||
}
|
||||
}, [mode]);
|
||||
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
const conversationHistoryRef = useRef<Array<{ role: string; content: string }>>([]);
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
@@ -82,9 +114,16 @@ export function useChat({
|
||||
const data = await response.json();
|
||||
setState((prev) => ({ ...prev, sessionId: data.session_id }));
|
||||
|
||||
// Connect WebSocket
|
||||
// Connect WebSocket - use backend directly in dev mode
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsUrl = `${protocol}//${window.location.host}/api/claude/sessions/${data.session_id}/ws`;
|
||||
// Use port 8001 to match start-dashboard.bat
|
||||
const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
|
||||
// Both modes use the same WebSocket - mode is handled by session config
|
||||
// Power mode uses --dangerously-skip-permissions in CLI
|
||||
// User mode uses --allowedTools to restrict access
|
||||
const wsPath = `/api/claude/sessions/${data.session_id}/ws`;
|
||||
const wsUrl = `${protocol}//${backendHost}${wsPath}`;
|
||||
console.log(`[useChat] Connecting to WebSocket (${state.mode} mode): ${wsUrl}`);
|
||||
const ws = new WebSocket(wsUrl);
|
||||
|
||||
ws.onopen = () => {
|
||||
@@ -126,6 +165,9 @@ export function useChat({
|
||||
|
||||
// Handle WebSocket messages
|
||||
const handleWebSocketMessage = useCallback((data: any) => {
|
||||
// Debug: log all incoming WebSocket messages
|
||||
console.log('[useChat] WebSocket message received:', data.type, data);
|
||||
|
||||
switch (data.type) {
|
||||
case 'text':
|
||||
currentMessageRef.current += data.content || '';
|
||||
@@ -212,11 +254,51 @@ export function useChat({
|
||||
// Canvas state was updated - could show notification
|
||||
break;
|
||||
|
||||
case 'canvas_modification':
|
||||
// Assistant wants to modify the canvas (from MCP tools in user mode)
|
||||
console.log('[useChat] Received canvas_modification:', data.modification);
|
||||
if (onCanvasModification && data.modification) {
|
||||
console.log('[useChat] Calling onCanvasModification callback');
|
||||
onCanvasModification(data.modification);
|
||||
} else {
|
||||
console.warn('[useChat] canvas_modification received but no handler or modification:', {
|
||||
hasCallback: !!onCanvasModification,
|
||||
modification: data.modification
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
||||
case 'spec_updated':
|
||||
// Assistant modified the spec - we receive the full updated spec
|
||||
console.log('[useChat] Spec updated by assistant:', data.tool, data.reason);
|
||||
if (onSpecUpdated && data.spec) {
|
||||
// Directly update the canvas with the new spec
|
||||
onSpecUpdated(data.spec);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'spec_modified':
|
||||
// Legacy: Assistant modified the spec directly (from power mode write tools)
|
||||
console.log('[useChat] Spec was modified by assistant (legacy):', data.tool, data.changes);
|
||||
// Treat this as a canvas modification to trigger reload
|
||||
if (onCanvasModification) {
|
||||
// Create a synthetic modification event to trigger canvas refresh
|
||||
onCanvasModification({
|
||||
action: 'add_node', // Use add_node as it triggers refresh
|
||||
data: {
|
||||
_refresh: true,
|
||||
tool: data.tool,
|
||||
changes: data.changes,
|
||||
},
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
||||
case 'pong':
|
||||
// Heartbeat response - ignore
|
||||
break;
|
||||
}
|
||||
}, [onError]);
|
||||
}, [onError, onCanvasModification]);
|
||||
|
||||
// Switch mode (requires new session)
|
||||
const switchMode = useCallback(async (newMode: ChatMode) => {
|
||||
@@ -462,6 +544,18 @@ export function useChat({
|
||||
}
|
||||
}, [useWebSocket]);
|
||||
|
||||
// Notify backend when user edits canvas (so Claude sees the changes)
|
||||
const notifyCanvasEdit = useCallback((spec: any) => {
|
||||
if (useWebSocket && wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(
|
||||
JSON.stringify({
|
||||
type: 'canvas_edit',
|
||||
spec: spec,
|
||||
})
|
||||
);
|
||||
}
|
||||
}, [useWebSocket]);
|
||||
|
||||
return {
|
||||
messages: state.messages,
|
||||
isThinking: state.isThinking,
|
||||
@@ -475,5 +569,6 @@ export function useChat({
|
||||
cancelRequest,
|
||||
switchMode,
|
||||
updateCanvasState,
|
||||
notifyCanvasEdit,
|
||||
};
|
||||
}
|
||||
|
||||
349
atomizer-dashboard/frontend/src/hooks/useClaudeCode.ts
Normal file
349
atomizer-dashboard/frontend/src/hooks/useClaudeCode.ts
Normal file
@@ -0,0 +1,349 @@
|
||||
/**
|
||||
* Hook for Claude Code CLI integration
|
||||
*
|
||||
* Connects to backend that spawns actual Claude Code CLI processes.
|
||||
* This gives full power: file editing, command execution, etc.
|
||||
*
|
||||
* Unlike useChat (which uses MCP tools), this hook:
|
||||
* - Spawns actual Claude Code CLI in the backend
|
||||
* - Has full file system access
|
||||
* - Can edit files directly (not just return instructions)
|
||||
* - Uses Opus 4.5 model
|
||||
* - Has all Claude Code capabilities
|
||||
*/
|
||||
|
||||
import { useState, useCallback, useRef, useEffect } from 'react';
|
||||
import { Message } from '../components/chat/ChatMessage';
|
||||
import { useCanvasStore } from './useCanvasStore';
|
||||
|
||||
export interface CanvasState {
|
||||
nodes: any[];
|
||||
edges: any[];
|
||||
studyName?: string;
|
||||
studyPath?: string;
|
||||
}
|
||||
|
||||
interface UseClaudeCodeOptions {
|
||||
studyId?: string | null;
|
||||
canvasState?: CanvasState | null;
|
||||
onError?: (error: string) => void;
|
||||
onCanvasRefresh?: (studyId: string) => void;
|
||||
}
|
||||
|
||||
interface ClaudeCodeState {
|
||||
messages: Message[];
|
||||
isThinking: boolean;
|
||||
error: string | null;
|
||||
sessionId: string | null;
|
||||
isConnected: boolean;
|
||||
workingDir: string | null;
|
||||
}
|
||||
|
||||
export function useClaudeCode({
|
||||
studyId,
|
||||
canvasState: initialCanvasState,
|
||||
onError,
|
||||
onCanvasRefresh,
|
||||
}: UseClaudeCodeOptions = {}) {
|
||||
const [state, setState] = useState<ClaudeCodeState>({
|
||||
messages: [],
|
||||
isThinking: false,
|
||||
error: null,
|
||||
sessionId: null,
|
||||
isConnected: false,
|
||||
workingDir: null,
|
||||
});
|
||||
|
||||
// Track canvas state for sending with messages
|
||||
const canvasStateRef = useRef<CanvasState | null>(initialCanvasState || null);
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
const currentMessageRef = useRef<string>('');
|
||||
const reconnectAttempts = useRef(0);
|
||||
const maxReconnectAttempts = 3;
|
||||
|
||||
// Keep canvas state in sync with prop changes
|
||||
useEffect(() => {
|
||||
if (initialCanvasState) {
|
||||
canvasStateRef.current = initialCanvasState;
|
||||
}
|
||||
}, [initialCanvasState]);
|
||||
|
||||
// Get canvas store for auto-refresh
|
||||
const { loadFromConfig } = useCanvasStore();
|
||||
|
||||
// Connect to Claude Code WebSocket
|
||||
useEffect(() => {
|
||||
const connect = () => {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
// In development, connect directly to backend (bypass Vite proxy for WebSockets)
|
||||
// Use port 8001 to match start-dashboard.bat
|
||||
const backendHost = import.meta.env.DEV ? 'localhost:8001' : window.location.host;
|
||||
|
||||
// Use study-specific endpoint if studyId provided
|
||||
const wsUrl = studyId
|
||||
? `${protocol}//${backendHost}/api/claude-code/ws/${encodeURIComponent(studyId)}`
|
||||
: `${protocol}//${backendHost}/api/claude-code/ws`;
|
||||
|
||||
console.log('[ClaudeCode] Connecting to:', wsUrl);
|
||||
const ws = new WebSocket(wsUrl);
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log('[ClaudeCode] Connected');
|
||||
setState((prev) => ({ ...prev, isConnected: true, error: null }));
|
||||
reconnectAttempts.current = 0;
|
||||
|
||||
// If no studyId in URL, send init message
|
||||
if (!studyId) {
|
||||
ws.send(JSON.stringify({ type: 'init', study_id: null }));
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
console.log('[ClaudeCode] Disconnected');
|
||||
setState((prev) => ({ ...prev, isConnected: false }));
|
||||
|
||||
// Attempt reconnection
|
||||
if (reconnectAttempts.current < maxReconnectAttempts) {
|
||||
reconnectAttempts.current++;
|
||||
console.log(`[ClaudeCode] Reconnecting... attempt ${reconnectAttempts.current}`);
|
||||
setTimeout(connect, 2000 * reconnectAttempts.current);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (event) => {
|
||||
console.error('[ClaudeCode] WebSocket error:', event);
|
||||
setState((prev) => ({ ...prev, isConnected: false }));
|
||||
onError?.('Claude Code connection error');
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
handleWebSocketMessage(data);
|
||||
} catch (e) {
|
||||
console.error('[ClaudeCode] Failed to parse message:', e);
|
||||
}
|
||||
};
|
||||
|
||||
wsRef.current = ws;
|
||||
};
|
||||
|
||||
connect();
|
||||
|
||||
return () => {
|
||||
reconnectAttempts.current = maxReconnectAttempts; // Prevent reconnection on unmount
|
||||
wsRef.current?.close();
|
||||
wsRef.current = null;
|
||||
};
|
||||
}, [studyId]);
|
||||
|
||||
// Handle WebSocket messages
|
||||
const handleWebSocketMessage = useCallback(
|
||||
(data: any) => {
|
||||
switch (data.type) {
|
||||
case 'initialized':
|
||||
console.log('[ClaudeCode] Session initialized:', data.session_id);
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
sessionId: data.session_id,
|
||||
workingDir: data.working_dir || null,
|
||||
}));
|
||||
break;
|
||||
|
||||
case 'text':
|
||||
currentMessageRef.current += data.content || '';
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
messages: prev.messages.map((msg, idx) =>
|
||||
idx === prev.messages.length - 1 && msg.role === 'assistant'
|
||||
? { ...msg, content: currentMessageRef.current }
|
||||
: msg
|
||||
),
|
||||
}));
|
||||
break;
|
||||
|
||||
case 'done':
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
isThinking: false,
|
||||
messages: prev.messages.map((msg, idx) =>
|
||||
idx === prev.messages.length - 1 && msg.role === 'assistant'
|
||||
? { ...msg, isStreaming: false }
|
||||
: msg
|
||||
),
|
||||
}));
|
||||
currentMessageRef.current = '';
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
console.error('[ClaudeCode] Error:', data.content);
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
isThinking: false,
|
||||
error: data.content || 'Unknown error',
|
||||
}));
|
||||
onError?.(data.content || 'Unknown error');
|
||||
currentMessageRef.current = '';
|
||||
break;
|
||||
|
||||
case 'refresh_canvas':
|
||||
// Claude made file changes - trigger canvas refresh
|
||||
console.log('[ClaudeCode] Canvas refresh requested:', data.reason);
|
||||
if (data.study_id) {
|
||||
onCanvasRefresh?.(data.study_id);
|
||||
reloadCanvasFromStudy(data.study_id);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'canvas_updated':
|
||||
console.log('[ClaudeCode] Canvas state updated');
|
||||
break;
|
||||
|
||||
case 'pong':
|
||||
// Heartbeat response
|
||||
break;
|
||||
|
||||
default:
|
||||
console.log('[ClaudeCode] Unknown message type:', data.type);
|
||||
}
|
||||
},
|
||||
[onError, onCanvasRefresh]
|
||||
);
|
||||
|
||||
// Reload canvas from study config
|
||||
const reloadCanvasFromStudy = useCallback(
|
||||
async (studyIdToReload: string) => {
|
||||
try {
|
||||
console.log('[ClaudeCode] Reloading canvas for study:', studyIdToReload);
|
||||
|
||||
// Fetch fresh config from backend
|
||||
const response = await fetch(`/api/optimization/studies/${encodeURIComponent(studyIdToReload)}/config`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch config: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const config = data.config; // API returns { config: ..., path: ..., study_id: ... }
|
||||
|
||||
// Reload canvas with new config
|
||||
loadFromConfig(config);
|
||||
|
||||
// Add system message about refresh
|
||||
const refreshMessage: Message = {
|
||||
id: `msg_${Date.now()}_refresh`,
|
||||
role: 'system',
|
||||
content: `Canvas refreshed with latest changes from ${studyIdToReload}`,
|
||||
timestamp: new Date(),
|
||||
};
|
||||
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
messages: [...prev.messages, refreshMessage],
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error('[ClaudeCode] Failed to reload canvas:', error);
|
||||
}
|
||||
},
|
||||
[loadFromConfig]
|
||||
);
|
||||
|
||||
const generateMessageId = () => {
|
||||
return `msg_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
};
|
||||
|
||||
const sendMessage = useCallback(
|
||||
async (content: string) => {
|
||||
if (!content.trim() || state.isThinking) return;
|
||||
|
||||
if (!wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) {
|
||||
onError?.('Not connected to Claude Code');
|
||||
return;
|
||||
}
|
||||
|
||||
// Add user message
|
||||
const userMessage: Message = {
|
||||
id: generateMessageId(),
|
||||
role: 'user',
|
||||
content: content.trim(),
|
||||
timestamp: new Date(),
|
||||
};
|
||||
|
||||
// Add assistant message placeholder
|
||||
const assistantMessage: Message = {
|
||||
id: generateMessageId(),
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
timestamp: new Date(),
|
||||
isStreaming: true,
|
||||
};
|
||||
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
messages: [...prev.messages, userMessage, assistantMessage],
|
||||
isThinking: true,
|
||||
error: null,
|
||||
}));
|
||||
|
||||
// Reset current message tracking
|
||||
currentMessageRef.current = '';
|
||||
|
||||
// Send message via WebSocket with canvas state
|
||||
wsRef.current.send(
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
content: content.trim(),
|
||||
canvas_state: canvasStateRef.current || undefined,
|
||||
})
|
||||
);
|
||||
},
|
||||
[state.isThinking, onError]
|
||||
);
|
||||
|
||||
const clearMessages = useCallback(() => {
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
messages: [],
|
||||
error: null,
|
||||
}));
|
||||
currentMessageRef.current = '';
|
||||
}, []);
|
||||
|
||||
// Update canvas state (call this when canvas changes)
|
||||
const updateCanvasState = useCallback((newCanvasState: CanvasState | null) => {
|
||||
canvasStateRef.current = newCanvasState;
|
||||
|
||||
// Also send to backend to update context
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(
|
||||
JSON.stringify({
|
||||
type: 'set_canvas',
|
||||
canvas_state: newCanvasState,
|
||||
})
|
||||
);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Send ping to keep connection alive
|
||||
useEffect(() => {
|
||||
const pingInterval = setInterval(() => {
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(JSON.stringify({ type: 'ping' }));
|
||||
}
|
||||
}, 30000); // Every 30 seconds
|
||||
|
||||
return () => clearInterval(pingInterval);
|
||||
}, []);
|
||||
|
||||
return {
|
||||
messages: state.messages,
|
||||
isThinking: state.isThinking,
|
||||
error: state.error,
|
||||
sessionId: state.sessionId,
|
||||
isConnected: state.isConnected,
|
||||
workingDir: state.workingDir,
|
||||
sendMessage,
|
||||
clearMessages,
|
||||
updateCanvasState,
|
||||
reloadCanvasFromStudy,
|
||||
};
|
||||
}
|
||||
335
atomizer-dashboard/frontend/src/hooks/useOptimizationStream.ts
Normal file
335
atomizer-dashboard/frontend/src/hooks/useOptimizationStream.ts
Normal file
@@ -0,0 +1,335 @@
|
||||
/**
|
||||
* useOptimizationStream - Enhanced WebSocket hook for real-time optimization updates
|
||||
*
|
||||
* This hook provides:
|
||||
* - Real-time trial updates (no polling needed)
|
||||
* - Best trial tracking
|
||||
* - Progress tracking
|
||||
* - Error detection and reporting
|
||||
* - Integration with panel store for error display
|
||||
* - Automatic reconnection
|
||||
*
|
||||
* Usage:
|
||||
* ```tsx
|
||||
* const {
|
||||
* isConnected,
|
||||
* progress,
|
||||
* bestTrial,
|
||||
* recentTrials,
|
||||
* status
|
||||
* } = useOptimizationStream(studyId);
|
||||
* ```
|
||||
*/
|
||||
|
||||
import { useState, useEffect, useCallback, useRef } from 'react';
|
||||
import useWebSocket, { ReadyState } from 'react-use-websocket';
|
||||
import { usePanelStore } from './usePanelStore';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export interface TrialData {
|
||||
trial_number: number;
|
||||
trial_num: number;
|
||||
objective: number | null;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
user_attrs: Record<string, unknown>;
|
||||
source: 'FEA' | 'NN' | string;
|
||||
start_time: string;
|
||||
end_time: string;
|
||||
study_name: string;
|
||||
constraint_satisfied: boolean;
|
||||
}
|
||||
|
||||
export interface ProgressData {
|
||||
current: number;
|
||||
total: number;
|
||||
percentage: number;
|
||||
fea_count: number;
|
||||
nn_count: number;
|
||||
timestamp: string;
|
||||
}
|
||||
|
||||
export interface BestTrialData {
|
||||
trial_number: number;
|
||||
value: number;
|
||||
params: Record<string, number>;
|
||||
improvement: number;
|
||||
}
|
||||
|
||||
export interface ParetoData {
|
||||
pareto_front: Array<{
|
||||
trial_number: number;
|
||||
values: number[];
|
||||
params: Record<string, number>;
|
||||
constraint_satisfied: boolean;
|
||||
source: string;
|
||||
}>;
|
||||
count: number;
|
||||
}
|
||||
|
||||
export type OptimizationStatus = 'disconnected' | 'connecting' | 'connected' | 'running' | 'paused' | 'completed' | 'failed';
|
||||
|
||||
export interface OptimizationStreamState {
|
||||
isConnected: boolean;
|
||||
status: OptimizationStatus;
|
||||
progress: ProgressData | null;
|
||||
bestTrial: BestTrialData | null;
|
||||
recentTrials: TrialData[];
|
||||
paretoFront: ParetoData | null;
|
||||
lastUpdate: number | null;
|
||||
error: string | null;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Hook
|
||||
// ============================================================================
|
||||
|
||||
interface UseOptimizationStreamOptions {
|
||||
/** Maximum number of recent trials to keep */
|
||||
maxRecentTrials?: number;
|
||||
/** Callback when a new trial completes */
|
||||
onTrialComplete?: (trial: TrialData) => void;
|
||||
/** Callback when a new best is found */
|
||||
onNewBest?: (best: BestTrialData) => void;
|
||||
/** Callback on progress update */
|
||||
onProgress?: (progress: ProgressData) => void;
|
||||
/** Whether to auto-report errors to the error panel */
|
||||
autoReportErrors?: boolean;
|
||||
}
|
||||
|
||||
export function useOptimizationStream(
|
||||
studyId: string | null | undefined,
|
||||
options: UseOptimizationStreamOptions = {}
|
||||
) {
|
||||
const {
|
||||
maxRecentTrials = 20,
|
||||
onTrialComplete,
|
||||
onNewBest,
|
||||
onProgress,
|
||||
autoReportErrors = true,
|
||||
} = options;
|
||||
|
||||
// Panel store for error reporting
|
||||
const { addError } = usePanelStore();
|
||||
|
||||
// State
|
||||
const [state, setState] = useState<OptimizationStreamState>({
|
||||
isConnected: false,
|
||||
status: 'disconnected',
|
||||
progress: null,
|
||||
bestTrial: null,
|
||||
recentTrials: [],
|
||||
paretoFront: null,
|
||||
lastUpdate: null,
|
||||
error: null,
|
||||
});
|
||||
|
||||
// Track last error timestamp to avoid duplicates
|
||||
const lastErrorTime = useRef<number>(0);
|
||||
|
||||
// Build WebSocket URL
|
||||
const socketUrl = studyId
|
||||
? `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${
|
||||
import.meta.env.DEV ? 'localhost:8001' : window.location.host
|
||||
}/api/ws/optimization/${encodeURIComponent(studyId)}`
|
||||
: null;
|
||||
|
||||
// WebSocket connection
|
||||
const { sendMessage, lastMessage, readyState } = useWebSocket(socketUrl, {
|
||||
shouldReconnect: () => true,
|
||||
reconnectAttempts: 10,
|
||||
reconnectInterval: 3000,
|
||||
onOpen: () => {
|
||||
console.log('[OptStream] Connected to optimization stream');
|
||||
setState(prev => ({ ...prev, isConnected: true, status: 'connected', error: null }));
|
||||
},
|
||||
onClose: () => {
|
||||
console.log('[OptStream] Disconnected from optimization stream');
|
||||
setState(prev => ({ ...prev, isConnected: false, status: 'disconnected' }));
|
||||
},
|
||||
onError: (event) => {
|
||||
console.error('[OptStream] WebSocket error:', event);
|
||||
setState(prev => ({ ...prev, error: 'WebSocket connection error' }));
|
||||
},
|
||||
});
|
||||
|
||||
// Update connection status
|
||||
useEffect(() => {
|
||||
const statusMap: Record<ReadyState, OptimizationStatus> = {
|
||||
[ReadyState.CONNECTING]: 'connecting',
|
||||
[ReadyState.OPEN]: 'connected',
|
||||
[ReadyState.CLOSING]: 'disconnected',
|
||||
[ReadyState.CLOSED]: 'disconnected',
|
||||
[ReadyState.UNINSTANTIATED]: 'disconnected',
|
||||
};
|
||||
|
||||
setState(prev => ({
|
||||
...prev,
|
||||
isConnected: readyState === ReadyState.OPEN,
|
||||
status: prev.status === 'running' || prev.status === 'completed' || prev.status === 'failed'
|
||||
? prev.status
|
||||
: statusMap[readyState] || 'disconnected',
|
||||
}));
|
||||
}, [readyState]);
|
||||
|
||||
// Process incoming messages
|
||||
useEffect(() => {
|
||||
if (!lastMessage?.data) return;
|
||||
|
||||
try {
|
||||
const message = JSON.parse(lastMessage.data);
|
||||
const { type, data } = message;
|
||||
|
||||
switch (type) {
|
||||
case 'connected':
|
||||
console.log('[OptStream] Connection confirmed:', data.message);
|
||||
break;
|
||||
|
||||
case 'trial_completed':
|
||||
handleTrialComplete(data as TrialData);
|
||||
break;
|
||||
|
||||
case 'new_best':
|
||||
handleNewBest(data as BestTrialData);
|
||||
break;
|
||||
|
||||
case 'progress':
|
||||
handleProgress(data as ProgressData);
|
||||
break;
|
||||
|
||||
case 'pareto_update':
|
||||
handleParetoUpdate(data as ParetoData);
|
||||
break;
|
||||
|
||||
case 'heartbeat':
|
||||
case 'pong':
|
||||
// Keep-alive messages
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
handleError(data);
|
||||
break;
|
||||
|
||||
default:
|
||||
console.log('[OptStream] Unknown message type:', type, data);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[OptStream] Failed to parse message:', e);
|
||||
}
|
||||
}, [lastMessage]);
|
||||
|
||||
// Handler functions
|
||||
const handleTrialComplete = useCallback((trial: TrialData) => {
|
||||
setState(prev => {
|
||||
const newTrials = [trial, ...prev.recentTrials].slice(0, maxRecentTrials);
|
||||
return {
|
||||
...prev,
|
||||
recentTrials: newTrials,
|
||||
lastUpdate: Date.now(),
|
||||
status: 'running',
|
||||
};
|
||||
});
|
||||
|
||||
onTrialComplete?.(trial);
|
||||
}, [maxRecentTrials, onTrialComplete]);
|
||||
|
||||
const handleNewBest = useCallback((best: BestTrialData) => {
|
||||
setState(prev => ({
|
||||
...prev,
|
||||
bestTrial: best,
|
||||
lastUpdate: Date.now(),
|
||||
}));
|
||||
|
||||
onNewBest?.(best);
|
||||
}, [onNewBest]);
|
||||
|
||||
const handleProgress = useCallback((progress: ProgressData) => {
|
||||
setState(prev => {
|
||||
// Determine status based on progress
|
||||
let status: OptimizationStatus = prev.status;
|
||||
if (progress.current > 0 && progress.current < progress.total) {
|
||||
status = 'running';
|
||||
} else if (progress.current >= progress.total) {
|
||||
status = 'completed';
|
||||
}
|
||||
|
||||
return {
|
||||
...prev,
|
||||
progress,
|
||||
status,
|
||||
lastUpdate: Date.now(),
|
||||
};
|
||||
});
|
||||
|
||||
onProgress?.(progress);
|
||||
}, [onProgress]);
|
||||
|
||||
const handleParetoUpdate = useCallback((pareto: ParetoData) => {
|
||||
setState(prev => ({
|
||||
...prev,
|
||||
paretoFront: pareto,
|
||||
lastUpdate: Date.now(),
|
||||
}));
|
||||
}, []);
|
||||
|
||||
const handleError = useCallback((errorData: { message: string; details?: string; trial?: number }) => {
|
||||
const now = Date.now();
|
||||
|
||||
// Avoid duplicate errors within 5 seconds
|
||||
if (now - lastErrorTime.current < 5000) return;
|
||||
lastErrorTime.current = now;
|
||||
|
||||
setState(prev => ({
|
||||
...prev,
|
||||
error: errorData.message,
|
||||
status: 'failed',
|
||||
}));
|
||||
|
||||
if (autoReportErrors) {
|
||||
addError({
|
||||
type: 'system_error',
|
||||
message: errorData.message,
|
||||
details: errorData.details,
|
||||
trial: errorData.trial,
|
||||
recoverable: true,
|
||||
suggestions: ['Check the optimization logs', 'Try restarting the optimization'],
|
||||
timestamp: now,
|
||||
});
|
||||
}
|
||||
}, [autoReportErrors, addError]);
|
||||
|
||||
// Send ping to keep connection alive
|
||||
useEffect(() => {
|
||||
if (readyState !== ReadyState.OPEN) return;
|
||||
|
||||
const interval = setInterval(() => {
|
||||
sendMessage(JSON.stringify({ type: 'ping' }));
|
||||
}, 25000); // Ping every 25 seconds
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [readyState, sendMessage]);
|
||||
|
||||
// Reset state when study changes
|
||||
useEffect(() => {
|
||||
setState({
|
||||
isConnected: false,
|
||||
status: 'disconnected',
|
||||
progress: null,
|
||||
bestTrial: null,
|
||||
recentTrials: [],
|
||||
paretoFront: null,
|
||||
lastUpdate: null,
|
||||
error: null,
|
||||
});
|
||||
}, [studyId]);
|
||||
|
||||
return {
|
||||
...state,
|
||||
sendPing: () => sendMessage(JSON.stringify({ type: 'ping' })),
|
||||
};
|
||||
}
|
||||
|
||||
export default useOptimizationStream;
|
||||
375
atomizer-dashboard/frontend/src/hooks/usePanelStore.ts
Normal file
375
atomizer-dashboard/frontend/src/hooks/usePanelStore.ts
Normal file
@@ -0,0 +1,375 @@
|
||||
/**
|
||||
* usePanelStore - Centralized state management for canvas panels
|
||||
*
|
||||
* This store manages the visibility and state of all panels in the canvas view.
|
||||
* Panels persist their state even when the user clicks elsewhere on the canvas.
|
||||
*
|
||||
* Panel Types:
|
||||
* - introspection: Model introspection results (floating, draggable)
|
||||
* - validation: Spec validation errors/warnings (floating)
|
||||
* - results: Trial results details (floating)
|
||||
* - error: Error display with recovery options (floating)
|
||||
*/
|
||||
|
||||
import { create } from 'zustand';
|
||||
import { persist } from 'zustand/middleware';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export interface IntrospectionData {
|
||||
filePath: string;
|
||||
studyId?: string;
|
||||
selectedFile?: string;
|
||||
result?: Record<string, unknown>;
|
||||
isLoading?: boolean;
|
||||
error?: string | null;
|
||||
}
|
||||
|
||||
export interface ValidationError {
|
||||
code: string;
|
||||
severity: 'error' | 'warning';
|
||||
path: string;
|
||||
message: string;
|
||||
suggestion?: string;
|
||||
nodeId?: string;
|
||||
}
|
||||
|
||||
export interface ValidationData {
|
||||
valid: boolean;
|
||||
errors: ValidationError[];
|
||||
warnings: ValidationError[];
|
||||
checkedAt: number;
|
||||
}
|
||||
|
||||
export interface OptimizationError {
|
||||
type: 'nx_crash' | 'solver_fail' | 'extractor_error' | 'config_error' | 'system_error' | 'unknown';
|
||||
trial?: number;
|
||||
message: string;
|
||||
details?: string;
|
||||
recoverable: boolean;
|
||||
suggestions: string[];
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface TrialResultData {
|
||||
trialNumber: number;
|
||||
params: Record<string, number>;
|
||||
objectives: Record<string, number>;
|
||||
constraints?: Record<string, { value: number; feasible: boolean }>;
|
||||
isFeasible: boolean;
|
||||
isBest: boolean;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface PanelPosition {
|
||||
x: number;
|
||||
y: number;
|
||||
}
|
||||
|
||||
export interface PanelState {
|
||||
open: boolean;
|
||||
position?: PanelPosition;
|
||||
minimized?: boolean;
|
||||
}
|
||||
|
||||
export interface IntrospectionPanelState extends PanelState {
|
||||
data?: IntrospectionData;
|
||||
}
|
||||
|
||||
export interface ValidationPanelState extends PanelState {
|
||||
data?: ValidationData;
|
||||
}
|
||||
|
||||
export interface ErrorPanelState extends PanelState {
|
||||
errors: OptimizationError[];
|
||||
}
|
||||
|
||||
export interface ResultsPanelState extends PanelState {
|
||||
data?: TrialResultData;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Store Interface
|
||||
// ============================================================================
|
||||
|
||||
interface PanelStore {
|
||||
// Panel states
|
||||
introspection: IntrospectionPanelState;
|
||||
validation: ValidationPanelState;
|
||||
error: ErrorPanelState;
|
||||
results: ResultsPanelState;
|
||||
|
||||
// Generic panel actions
|
||||
openPanel: (panel: 'introspection' | 'validation' | 'error' | 'results') => void;
|
||||
closePanel: (panel: 'introspection' | 'validation' | 'error' | 'results') => void;
|
||||
togglePanel: (panel: 'introspection' | 'validation' | 'error' | 'results') => void;
|
||||
minimizePanel: (panel: 'introspection' | 'validation' | 'error' | 'results') => void;
|
||||
setPanelPosition: (panel: 'introspection' | 'validation' | 'error' | 'results', position: PanelPosition) => void;
|
||||
|
||||
// Introspection-specific actions
|
||||
setIntrospectionData: (data: IntrospectionData) => void;
|
||||
updateIntrospectionResult: (result: Record<string, unknown>) => void;
|
||||
setIntrospectionLoading: (loading: boolean) => void;
|
||||
setIntrospectionError: (error: string | null) => void;
|
||||
setIntrospectionFile: (fileName: string) => void;
|
||||
|
||||
// Validation-specific actions
|
||||
setValidationData: (data: ValidationData) => void;
|
||||
clearValidation: () => void;
|
||||
|
||||
// Error-specific actions
|
||||
addError: (error: OptimizationError) => void;
|
||||
clearErrors: () => void;
|
||||
dismissError: (timestamp: number) => void;
|
||||
|
||||
// Results-specific actions
|
||||
setTrialResult: (data: TrialResultData) => void;
|
||||
clearTrialResult: () => void;
|
||||
|
||||
// Utility
|
||||
closeAllPanels: () => void;
|
||||
hasOpenPanels: () => boolean;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Default States
|
||||
// ============================================================================
|
||||
|
||||
const defaultIntrospection: IntrospectionPanelState = {
|
||||
open: false,
|
||||
position: { x: 100, y: 100 },
|
||||
minimized: false,
|
||||
data: undefined,
|
||||
};
|
||||
|
||||
const defaultValidation: ValidationPanelState = {
|
||||
open: false,
|
||||
position: { x: 150, y: 150 },
|
||||
minimized: false,
|
||||
data: undefined,
|
||||
};
|
||||
|
||||
const defaultError: ErrorPanelState = {
|
||||
open: false,
|
||||
position: { x: 200, y: 100 },
|
||||
minimized: false,
|
||||
errors: [],
|
||||
};
|
||||
|
||||
const defaultResults: ResultsPanelState = {
|
||||
open: false,
|
||||
position: { x: 250, y: 150 },
|
||||
minimized: false,
|
||||
data: undefined,
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Store Implementation
|
||||
// ============================================================================
|
||||
|
||||
export const usePanelStore = create<PanelStore>()(
|
||||
persist(
|
||||
(set, get) => ({
|
||||
// Initial states
|
||||
introspection: defaultIntrospection,
|
||||
validation: defaultValidation,
|
||||
error: defaultError,
|
||||
results: defaultResults,
|
||||
|
||||
// Generic panel actions
|
||||
openPanel: (panel) => set((state) => ({
|
||||
[panel]: { ...state[panel], open: true, minimized: false }
|
||||
})),
|
||||
|
||||
closePanel: (panel) => set((state) => ({
|
||||
[panel]: { ...state[panel], open: false }
|
||||
})),
|
||||
|
||||
togglePanel: (panel) => set((state) => ({
|
||||
[panel]: { ...state[panel], open: !state[panel].open, minimized: false }
|
||||
})),
|
||||
|
||||
minimizePanel: (panel) => set((state) => ({
|
||||
[panel]: { ...state[panel], minimized: !state[panel].minimized }
|
||||
})),
|
||||
|
||||
setPanelPosition: (panel, position) => set((state) => ({
|
||||
[panel]: { ...state[panel], position }
|
||||
})),
|
||||
|
||||
// Introspection actions
|
||||
setIntrospectionData: (data) => set((state) => ({
|
||||
introspection: {
|
||||
...state.introspection,
|
||||
open: true,
|
||||
data
|
||||
}
|
||||
})),
|
||||
|
||||
updateIntrospectionResult: (result) => set((state) => ({
|
||||
introspection: {
|
||||
...state.introspection,
|
||||
data: state.introspection.data
|
||||
? { ...state.introspection.data, result, isLoading: false, error: null }
|
||||
: undefined
|
||||
}
|
||||
})),
|
||||
|
||||
setIntrospectionLoading: (loading) => set((state) => ({
|
||||
introspection: {
|
||||
...state.introspection,
|
||||
data: state.introspection.data
|
||||
? { ...state.introspection.data, isLoading: loading }
|
||||
: undefined
|
||||
}
|
||||
})),
|
||||
|
||||
setIntrospectionError: (error) => set((state) => ({
|
||||
introspection: {
|
||||
...state.introspection,
|
||||
data: state.introspection.data
|
||||
? { ...state.introspection.data, error, isLoading: false }
|
||||
: undefined
|
||||
}
|
||||
})),
|
||||
|
||||
setIntrospectionFile: (fileName) => set((state) => ({
|
||||
introspection: {
|
||||
...state.introspection,
|
||||
data: state.introspection.data
|
||||
? { ...state.introspection.data, selectedFile: fileName }
|
||||
: undefined
|
||||
}
|
||||
})),
|
||||
|
||||
// Validation actions
|
||||
setValidationData: (data) => set((state) => ({
|
||||
validation: {
|
||||
...state.validation,
|
||||
open: true,
|
||||
data
|
||||
}
|
||||
})),
|
||||
|
||||
clearValidation: () => set((state) => ({
|
||||
validation: {
|
||||
...state.validation,
|
||||
data: undefined
|
||||
}
|
||||
})),
|
||||
|
||||
// Error actions
|
||||
addError: (error) => set((state) => ({
|
||||
error: {
|
||||
...state.error,
|
||||
open: true,
|
||||
errors: [...state.error.errors, error]
|
||||
}
|
||||
})),
|
||||
|
||||
clearErrors: () => set((state) => ({
|
||||
error: {
|
||||
...state.error,
|
||||
errors: [],
|
||||
open: false
|
||||
}
|
||||
})),
|
||||
|
||||
dismissError: (timestamp) => set((state) => {
|
||||
const newErrors = state.error.errors.filter(e => e.timestamp !== timestamp);
|
||||
return {
|
||||
error: {
|
||||
...state.error,
|
||||
errors: newErrors,
|
||||
open: newErrors.length > 0
|
||||
}
|
||||
};
|
||||
}),
|
||||
|
||||
// Results actions
|
||||
setTrialResult: (data) => set((state) => ({
|
||||
results: {
|
||||
...state.results,
|
||||
open: true,
|
||||
data
|
||||
}
|
||||
})),
|
||||
|
||||
clearTrialResult: () => set((state) => ({
|
||||
results: {
|
||||
...state.results,
|
||||
data: undefined,
|
||||
open: false
|
||||
}
|
||||
})),
|
||||
|
||||
// Utility
|
||||
closeAllPanels: () => set({
|
||||
introspection: { ...get().introspection, open: false },
|
||||
validation: { ...get().validation, open: false },
|
||||
error: { ...get().error, open: false },
|
||||
results: { ...get().results, open: false },
|
||||
}),
|
||||
|
||||
hasOpenPanels: () => {
|
||||
const state = get();
|
||||
return state.introspection.open ||
|
||||
state.validation.open ||
|
||||
state.error.open ||
|
||||
state.results.open;
|
||||
},
|
||||
}),
|
||||
{
|
||||
name: 'atomizer-panel-store',
|
||||
// Only persist certain fields (not loading states or errors)
|
||||
partialize: (state) => ({
|
||||
introspection: {
|
||||
position: state.introspection.position,
|
||||
// Don't persist open state - start fresh each session
|
||||
},
|
||||
validation: {
|
||||
position: state.validation.position,
|
||||
},
|
||||
error: {
|
||||
position: state.error.position,
|
||||
},
|
||||
results: {
|
||||
position: state.results.position,
|
||||
},
|
||||
}),
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// ============================================================================
|
||||
// Selector Hooks (for convenience)
|
||||
// ============================================================================
|
||||
|
||||
export const useIntrospectionPanel = () => usePanelStore((state) => state.introspection);
|
||||
export const useValidationPanel = () => usePanelStore((state) => state.validation);
|
||||
export const useErrorPanel = () => usePanelStore((state) => state.error);
|
||||
export const useResultsPanel = () => usePanelStore((state) => state.results);
|
||||
|
||||
// Actions
|
||||
export const usePanelActions = () => usePanelStore((state) => ({
|
||||
openPanel: state.openPanel,
|
||||
closePanel: state.closePanel,
|
||||
togglePanel: state.togglePanel,
|
||||
minimizePanel: state.minimizePanel,
|
||||
setPanelPosition: state.setPanelPosition,
|
||||
setIntrospectionData: state.setIntrospectionData,
|
||||
updateIntrospectionResult: state.updateIntrospectionResult,
|
||||
setIntrospectionLoading: state.setIntrospectionLoading,
|
||||
setIntrospectionError: state.setIntrospectionError,
|
||||
setIntrospectionFile: state.setIntrospectionFile,
|
||||
setValidationData: state.setValidationData,
|
||||
clearValidation: state.clearValidation,
|
||||
addError: state.addError,
|
||||
clearErrors: state.clearErrors,
|
||||
dismissError: state.dismissError,
|
||||
setTrialResult: state.setTrialResult,
|
||||
clearTrialResult: state.clearTrialResult,
|
||||
closeAllPanels: state.closeAllPanels,
|
||||
}));
|
||||
156
atomizer-dashboard/frontend/src/hooks/useResizablePanel.ts
Normal file
156
atomizer-dashboard/frontend/src/hooks/useResizablePanel.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
/**
|
||||
* useResizablePanel - Hook for creating resizable panels with persistence
|
||||
*
|
||||
* Features:
|
||||
* - Drag to resize
|
||||
* - Min/max constraints
|
||||
* - localStorage persistence
|
||||
* - Double-click to reset to default
|
||||
*/
|
||||
|
||||
import { useState, useCallback, useEffect, useRef } from 'react';
|
||||
|
||||
export interface ResizablePanelConfig {
|
||||
/** Unique key for localStorage persistence */
|
||||
storageKey: string;
|
||||
/** Default width in pixels */
|
||||
defaultWidth: number;
|
||||
/** Minimum width in pixels */
|
||||
minWidth: number;
|
||||
/** Maximum width in pixels */
|
||||
maxWidth: number;
|
||||
/** Side of the panel ('left' or 'right') - affects resize direction */
|
||||
side: 'left' | 'right';
|
||||
}
|
||||
|
||||
export interface ResizablePanelState {
|
||||
/** Current width in pixels */
|
||||
width: number;
|
||||
/** Whether user is currently dragging */
|
||||
isDragging: boolean;
|
||||
/** Start drag handler - attach to resize handle mousedown */
|
||||
startDrag: (e: React.MouseEvent) => void;
|
||||
/** Reset to default width */
|
||||
resetWidth: () => void;
|
||||
/** Set width programmatically */
|
||||
setWidth: (width: number) => void;
|
||||
}
|
||||
|
||||
const STORAGE_PREFIX = 'atomizer-panel-';
|
||||
|
||||
function getStoredWidth(key: string, defaultWidth: number): number {
|
||||
if (typeof window === 'undefined') return defaultWidth;
|
||||
|
||||
try {
|
||||
const stored = localStorage.getItem(STORAGE_PREFIX + key);
|
||||
if (stored) {
|
||||
const parsed = parseInt(stored, 10);
|
||||
if (!isNaN(parsed)) return parsed;
|
||||
}
|
||||
} catch {
|
||||
// localStorage not available
|
||||
}
|
||||
return defaultWidth;
|
||||
}
|
||||
|
||||
function storeWidth(key: string, width: number): void {
|
||||
if (typeof window === 'undefined') return;
|
||||
|
||||
try {
|
||||
localStorage.setItem(STORAGE_PREFIX + key, String(width));
|
||||
} catch {
|
||||
// localStorage not available
|
||||
}
|
||||
}
|
||||
|
||||
export function useResizablePanel(config: ResizablePanelConfig): ResizablePanelState {
|
||||
const { storageKey, defaultWidth, minWidth, maxWidth, side } = config;
|
||||
|
||||
// Initialize from localStorage
|
||||
const [width, setWidthState] = useState(() => {
|
||||
const stored = getStoredWidth(storageKey, defaultWidth);
|
||||
return Math.max(minWidth, Math.min(maxWidth, stored));
|
||||
});
|
||||
|
||||
const [isDragging, setIsDragging] = useState(false);
|
||||
|
||||
// Track initial position for drag calculation
|
||||
const dragStartRef = useRef<{ x: number; width: number } | null>(null);
|
||||
|
||||
// Clamp width within bounds
|
||||
const clampWidth = useCallback((w: number) => {
|
||||
return Math.max(minWidth, Math.min(maxWidth, w));
|
||||
}, [minWidth, maxWidth]);
|
||||
|
||||
// Set width with clamping and persistence
|
||||
const setWidth = useCallback((newWidth: number) => {
|
||||
const clamped = clampWidth(newWidth);
|
||||
setWidthState(clamped);
|
||||
storeWidth(storageKey, clamped);
|
||||
}, [clampWidth, storageKey]);
|
||||
|
||||
// Reset to default
|
||||
const resetWidth = useCallback(() => {
|
||||
setWidth(defaultWidth);
|
||||
}, [defaultWidth, setWidth]);
|
||||
|
||||
// Start drag handler
|
||||
const startDrag = useCallback((e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
setIsDragging(true);
|
||||
dragStartRef.current = { x: e.clientX, width };
|
||||
}, [width]);
|
||||
|
||||
// Handle mouse move during drag
|
||||
useEffect(() => {
|
||||
if (!isDragging) return;
|
||||
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
if (!dragStartRef.current) return;
|
||||
|
||||
const delta = e.clientX - dragStartRef.current.x;
|
||||
|
||||
// For left panels, positive delta increases width
|
||||
// For right panels, negative delta increases width
|
||||
const newWidth = side === 'left'
|
||||
? dragStartRef.current.width + delta
|
||||
: dragStartRef.current.width - delta;
|
||||
|
||||
setWidthState(clampWidth(newWidth));
|
||||
};
|
||||
|
||||
const handleMouseUp = () => {
|
||||
if (dragStartRef.current) {
|
||||
// Persist the final width
|
||||
storeWidth(storageKey, width);
|
||||
}
|
||||
setIsDragging(false);
|
||||
dragStartRef.current = null;
|
||||
};
|
||||
|
||||
// Add listeners to document for smooth dragging
|
||||
document.addEventListener('mousemove', handleMouseMove);
|
||||
document.addEventListener('mouseup', handleMouseUp);
|
||||
|
||||
// Change cursor globally during drag
|
||||
document.body.style.cursor = 'col-resize';
|
||||
document.body.style.userSelect = 'none';
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('mousemove', handleMouseMove);
|
||||
document.removeEventListener('mouseup', handleMouseUp);
|
||||
document.body.style.cursor = '';
|
||||
document.body.style.userSelect = '';
|
||||
};
|
||||
}, [isDragging, side, clampWidth, storageKey, width]);
|
||||
|
||||
return {
|
||||
width,
|
||||
isDragging,
|
||||
startDrag,
|
||||
resetWidth,
|
||||
setWidth,
|
||||
};
|
||||
}
|
||||
|
||||
export default useResizablePanel;
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useUndoRedo, UndoRedoResult } from './useUndoRedo';
|
||||
import { useSpecStore, useSpec, useSpecIsDirty } from './useSpecStore';
|
||||
import { useSpecStore, useSpec } from './useSpecStore';
|
||||
import { AtomizerSpec } from '../types/atomizer-spec';
|
||||
|
||||
const STORAGE_KEY_PREFIX = 'atomizer-spec-history-';
|
||||
@@ -28,7 +28,6 @@ export interface SpecUndoRedoResult extends UndoRedoResult<AtomizerSpec | null>
|
||||
|
||||
export function useSpecUndoRedo(): SpecUndoRedoResult {
|
||||
const spec = useSpec();
|
||||
const isDirty = useSpecIsDirty();
|
||||
const studyId = useSpecStore((state) => state.studyId);
|
||||
const lastSpecRef = useRef<AtomizerSpec | null>(null);
|
||||
|
||||
@@ -56,13 +55,21 @@ export function useSpecUndoRedo(): SpecUndoRedoResult {
|
||||
},
|
||||
});
|
||||
|
||||
// Record snapshot when spec changes (and is dirty)
|
||||
// Record snapshot when spec changes
|
||||
// Note: We removed the isDirty check because with auto-save, isDirty is always false
|
||||
// after the API call completes. Instead, we compare the spec directly.
|
||||
useEffect(() => {
|
||||
if (spec && isDirty && spec !== lastSpecRef.current) {
|
||||
lastSpecRef.current = spec;
|
||||
undoRedo.recordSnapshot();
|
||||
if (spec && spec !== lastSpecRef.current) {
|
||||
// Deep compare to avoid recording duplicate snapshots
|
||||
const specStr = JSON.stringify(spec);
|
||||
const lastStr = lastSpecRef.current ? JSON.stringify(lastSpecRef.current) : '';
|
||||
|
||||
if (specStr !== lastStr) {
|
||||
lastSpecRef.current = spec;
|
||||
undoRedo.recordSnapshot();
|
||||
}
|
||||
}
|
||||
}, [spec, isDirty, undoRedo]);
|
||||
}, [spec, undoRedo]);
|
||||
|
||||
// Clear history when study changes
|
||||
useEffect(() => {
|
||||
|
||||
288
atomizer-dashboard/frontend/src/hooks/useSpecWebSocket.ts
Normal file
288
atomizer-dashboard/frontend/src/hooks/useSpecWebSocket.ts
Normal file
@@ -0,0 +1,288 @@
|
||||
/**
|
||||
* useSpecWebSocket - WebSocket connection for real-time spec sync
|
||||
*
|
||||
* Connects to the backend WebSocket endpoint for live spec updates.
|
||||
* Handles auto-reconnection, message parsing, and store updates.
|
||||
*
|
||||
* P2.11-P2.14: WebSocket sync implementation
|
||||
*/
|
||||
|
||||
import { useEffect, useRef, useCallback, useState } from 'react';
|
||||
import { useSpecStore } from './useSpecStore';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export type ConnectionStatus = 'disconnected' | 'connecting' | 'connected' | 'reconnecting';
|
||||
|
||||
interface SpecWebSocketMessage {
|
||||
type: 'modification' | 'full_sync' | 'error' | 'ping';
|
||||
payload: unknown;
|
||||
}
|
||||
|
||||
interface ModificationPayload {
|
||||
operation: 'set' | 'add' | 'remove';
|
||||
path: string;
|
||||
value?: unknown;
|
||||
modified_by: string;
|
||||
timestamp: string;
|
||||
hash: string;
|
||||
}
|
||||
|
||||
interface ErrorPayload {
|
||||
message: string;
|
||||
code?: string;
|
||||
}
|
||||
|
||||
interface UseSpecWebSocketOptions {
|
||||
/**
|
||||
* Enable auto-reconnect on disconnect (default: true)
|
||||
*/
|
||||
autoReconnect?: boolean;
|
||||
|
||||
/**
|
||||
* Reconnect delay in ms (default: 3000)
|
||||
*/
|
||||
reconnectDelay?: number;
|
||||
|
||||
/**
|
||||
* Max reconnect attempts (default: 10)
|
||||
*/
|
||||
maxReconnectAttempts?: number;
|
||||
|
||||
/**
|
||||
* Client identifier for tracking modifications (default: 'canvas')
|
||||
*/
|
||||
clientId?: string;
|
||||
}
|
||||
|
||||
interface UseSpecWebSocketReturn {
|
||||
/**
|
||||
* Current connection status
|
||||
*/
|
||||
status: ConnectionStatus;
|
||||
|
||||
/**
|
||||
* Manually disconnect
|
||||
*/
|
||||
disconnect: () => void;
|
||||
|
||||
/**
|
||||
* Manually reconnect
|
||||
*/
|
||||
reconnect: () => void;
|
||||
|
||||
/**
|
||||
* Send a message to the WebSocket (for future use)
|
||||
*/
|
||||
send: (message: SpecWebSocketMessage) => void;
|
||||
|
||||
/**
|
||||
* Last error message if any
|
||||
*/
|
||||
lastError: string | null;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Hook
|
||||
// ============================================================================
|
||||
|
||||
export function useSpecWebSocket(
|
||||
studyId: string | null,
|
||||
options: UseSpecWebSocketOptions = {}
|
||||
): UseSpecWebSocketReturn {
|
||||
const {
|
||||
autoReconnect = true,
|
||||
reconnectDelay = 3000,
|
||||
maxReconnectAttempts = 10,
|
||||
clientId = 'canvas',
|
||||
} = options;
|
||||
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
const reconnectAttemptsRef = useRef(0);
|
||||
const reconnectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null);
|
||||
|
||||
const [status, setStatus] = useState<ConnectionStatus>('disconnected');
|
||||
const [lastError, setLastError] = useState<string | null>(null);
|
||||
|
||||
// Get store actions
|
||||
const reloadSpec = useSpecStore((s) => s.reloadSpec);
|
||||
const setError = useSpecStore((s) => s.setError);
|
||||
|
||||
// Build WebSocket URL
|
||||
const getWsUrl = useCallback((id: string): string => {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const host = window.location.host;
|
||||
return `${protocol}//${host}/api/studies/${encodeURIComponent(id)}/spec/sync?client_id=${clientId}`;
|
||||
}, [clientId]);
|
||||
|
||||
// Handle incoming messages
|
||||
const handleMessage = useCallback((event: MessageEvent) => {
|
||||
try {
|
||||
const message: SpecWebSocketMessage = JSON.parse(event.data);
|
||||
|
||||
switch (message.type) {
|
||||
case 'modification': {
|
||||
const payload = message.payload as ModificationPayload;
|
||||
|
||||
// Skip if this is our own modification
|
||||
if (payload.modified_by === clientId) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Reload spec to get latest state
|
||||
// In a more sophisticated implementation, we could apply the patch locally
|
||||
reloadSpec().catch((err) => {
|
||||
console.error('Failed to reload spec after modification:', err);
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
case 'full_sync': {
|
||||
// Full spec sync requested (e.g., after reconnect)
|
||||
reloadSpec().catch((err) => {
|
||||
console.error('Failed to reload spec during full_sync:', err);
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
case 'error': {
|
||||
const payload = message.payload as ErrorPayload;
|
||||
console.error('WebSocket error:', payload.message);
|
||||
setLastError(payload.message);
|
||||
setError(payload.message);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ping': {
|
||||
// Keep-alive ping, respond with pong
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(JSON.stringify({ type: 'pong' }));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
console.warn('Unknown WebSocket message type:', message.type);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to parse WebSocket message:', error);
|
||||
}
|
||||
}, [clientId, reloadSpec, setError]);
|
||||
|
||||
// Connect to WebSocket
|
||||
const connect = useCallback(() => {
|
||||
if (!studyId) return;
|
||||
|
||||
// Clean up existing connection
|
||||
if (wsRef.current) {
|
||||
wsRef.current.close();
|
||||
}
|
||||
|
||||
setStatus('connecting');
|
||||
setLastError(null);
|
||||
|
||||
const url = getWsUrl(studyId);
|
||||
const ws = new WebSocket(url);
|
||||
|
||||
ws.onopen = () => {
|
||||
setStatus('connected');
|
||||
reconnectAttemptsRef.current = 0;
|
||||
};
|
||||
|
||||
ws.onmessage = handleMessage;
|
||||
|
||||
ws.onerror = (event) => {
|
||||
console.error('WebSocket error:', event);
|
||||
setLastError('WebSocket connection error');
|
||||
};
|
||||
|
||||
ws.onclose = (_event) => {
|
||||
setStatus('disconnected');
|
||||
|
||||
// Check if we should reconnect
|
||||
if (autoReconnect && reconnectAttemptsRef.current < maxReconnectAttempts) {
|
||||
reconnectAttemptsRef.current++;
|
||||
setStatus('reconnecting');
|
||||
|
||||
// Clear any existing reconnect timeout
|
||||
if (reconnectTimeoutRef.current) {
|
||||
clearTimeout(reconnectTimeoutRef.current);
|
||||
}
|
||||
|
||||
// Schedule reconnect with exponential backoff
|
||||
const delay = reconnectDelay * Math.min(reconnectAttemptsRef.current, 5);
|
||||
reconnectTimeoutRef.current = setTimeout(() => {
|
||||
connect();
|
||||
}, delay);
|
||||
} else if (reconnectAttemptsRef.current >= maxReconnectAttempts) {
|
||||
setLastError('Max reconnection attempts reached');
|
||||
}
|
||||
};
|
||||
|
||||
wsRef.current = ws;
|
||||
}, [studyId, getWsUrl, handleMessage, autoReconnect, reconnectDelay, maxReconnectAttempts]);
|
||||
|
||||
// Disconnect
|
||||
const disconnect = useCallback(() => {
|
||||
// Clear reconnect timeout
|
||||
if (reconnectTimeoutRef.current) {
|
||||
clearTimeout(reconnectTimeoutRef.current);
|
||||
reconnectTimeoutRef.current = null;
|
||||
}
|
||||
|
||||
// Close WebSocket
|
||||
if (wsRef.current) {
|
||||
wsRef.current.close();
|
||||
wsRef.current = null;
|
||||
}
|
||||
|
||||
reconnectAttemptsRef.current = maxReconnectAttempts; // Prevent auto-reconnect
|
||||
setStatus('disconnected');
|
||||
}, [maxReconnectAttempts]);
|
||||
|
||||
// Reconnect
|
||||
const reconnect = useCallback(() => {
|
||||
reconnectAttemptsRef.current = 0;
|
||||
connect();
|
||||
}, [connect]);
|
||||
|
||||
// Send message
|
||||
const send = useCallback((message: SpecWebSocketMessage) => {
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(JSON.stringify(message));
|
||||
} else {
|
||||
console.warn('WebSocket not connected, cannot send message');
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Connect when studyId changes
|
||||
useEffect(() => {
|
||||
if (studyId) {
|
||||
connect();
|
||||
} else {
|
||||
disconnect();
|
||||
}
|
||||
|
||||
return () => {
|
||||
// Cleanup on unmount or studyId change
|
||||
if (reconnectTimeoutRef.current) {
|
||||
clearTimeout(reconnectTimeoutRef.current);
|
||||
}
|
||||
if (wsRef.current) {
|
||||
wsRef.current.close();
|
||||
}
|
||||
};
|
||||
}, [studyId, connect, disconnect]);
|
||||
|
||||
return {
|
||||
status,
|
||||
disconnect,
|
||||
reconnect,
|
||||
send,
|
||||
lastError,
|
||||
};
|
||||
}
|
||||
|
||||
export default useSpecWebSocket;
|
||||
@@ -18,7 +18,8 @@ export const useOptimizationWebSocket = ({ studyId, onMessage }: UseOptimization
|
||||
const host = window.location.host; // This will be localhost:3000 in dev
|
||||
// If using proxy in vite.config.ts, this works.
|
||||
// If not, we might need to hardcode backend URL for dev:
|
||||
const backendHost = import.meta.env.DEV ? 'localhost:8000' : host;
|
||||
// Use port 8001 to match start-dashboard.bat
|
||||
const backendHost = import.meta.env.DEV ? 'localhost:8001' : host;
|
||||
|
||||
setSocketUrl(`${protocol}//${backendHost}/api/ws/optimization/${studyId}`);
|
||||
} else {
|
||||
|
||||
@@ -16,6 +16,7 @@ export interface BaseNodeData {
|
||||
label: string;
|
||||
configured: boolean;
|
||||
errors?: string[];
|
||||
resultValue?: number | string | null; // For Results Overlay
|
||||
}
|
||||
|
||||
export interface ModelNodeData extends BaseNodeData {
|
||||
@@ -24,9 +25,17 @@ export interface ModelNodeData extends BaseNodeData {
|
||||
fileType?: 'prt' | 'fem' | 'sim';
|
||||
}
|
||||
|
||||
export type SolverEngine = 'nxnastran' | 'mscnastran' | 'python' | 'abaqus' | 'ansys';
|
||||
export type NastranSolutionType = 'SOL101' | 'SOL103' | 'SOL105' | 'SOL106' | 'SOL111' | 'SOL112' | 'SOL200';
|
||||
|
||||
export interface SolverNodeData extends BaseNodeData {
|
||||
type: 'solver';
|
||||
solverType?: 'SOL101' | 'SOL103' | 'SOL105' | 'SOL106' | 'SOL111' | 'SOL112';
|
||||
/** Solver engine (nxnastran, mscnastran, python, etc.) */
|
||||
engine?: SolverEngine;
|
||||
/** Solution type for Nastran solvers */
|
||||
solverType?: NastranSolutionType;
|
||||
/** Python script path (for python engine) */
|
||||
scriptPath?: string;
|
||||
}
|
||||
|
||||
export interface DesignVarNodeData extends BaseNodeData {
|
||||
@@ -98,6 +107,7 @@ export interface ObjectiveNodeData extends BaseNodeData {
|
||||
extractorRef?: string; // Reference to extractor ID
|
||||
outputName?: string; // Which output from the extractor
|
||||
penaltyWeight?: number; // For hard constraints (penalty method)
|
||||
history?: number[]; // Recent values for sparkline visualization
|
||||
}
|
||||
|
||||
export interface ConstraintNodeData extends BaseNodeData {
|
||||
@@ -105,6 +115,7 @@ export interface ConstraintNodeData extends BaseNodeData {
|
||||
name?: string;
|
||||
operator?: '<' | '<=' | '>' | '>=' | '==';
|
||||
value?: number;
|
||||
isFeasible?: boolean; // For Results Overlay
|
||||
}
|
||||
|
||||
export interface AlgorithmNodeData extends BaseNodeData {
|
||||
|
||||
394
atomizer-dashboard/frontend/src/lib/validation/specValidator.ts
Normal file
394
atomizer-dashboard/frontend/src/lib/validation/specValidator.ts
Normal file
@@ -0,0 +1,394 @@
|
||||
/**
|
||||
* Spec Validator - Validate AtomizerSpec v2.0 before running optimization
|
||||
*
|
||||
* This validator checks the spec for completeness and correctness,
|
||||
* returning structured errors that can be displayed in the ValidationPanel.
|
||||
*/
|
||||
|
||||
import { AtomizerSpec } from '../../types/atomizer-spec';
|
||||
import { ValidationError, ValidationData } from '../../hooks/usePanelStore';
|
||||
|
||||
// ============================================================================
|
||||
// Validation Rules
|
||||
// ============================================================================
|
||||
|
||||
interface ValidationRule {
|
||||
code: string;
|
||||
check: (spec: AtomizerSpec) => ValidationError | null;
|
||||
}
|
||||
|
||||
const validationRules: ValidationRule[] = [
|
||||
// ---- Critical Errors (must fix) ----
|
||||
|
||||
{
|
||||
code: 'NO_DESIGN_VARS',
|
||||
check: (spec) => {
|
||||
const enabledDVs = spec.design_variables.filter(dv => dv.enabled !== false);
|
||||
if (enabledDVs.length === 0) {
|
||||
return {
|
||||
code: 'NO_DESIGN_VARS',
|
||||
severity: 'error',
|
||||
path: 'design_variables',
|
||||
message: 'No design variables defined',
|
||||
suggestion: 'Add at least one design variable from the introspection panel or drag from the palette.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'NO_OBJECTIVES',
|
||||
check: (spec) => {
|
||||
if (spec.objectives.length === 0) {
|
||||
return {
|
||||
code: 'NO_OBJECTIVES',
|
||||
severity: 'error',
|
||||
path: 'objectives',
|
||||
message: 'No objectives defined',
|
||||
suggestion: 'Add at least one objective to define what to optimize (minimize mass, maximize stiffness, etc.).',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'NO_EXTRACTORS',
|
||||
check: (spec) => {
|
||||
if (spec.extractors.length === 0) {
|
||||
return {
|
||||
code: 'NO_EXTRACTORS',
|
||||
severity: 'error',
|
||||
path: 'extractors',
|
||||
message: 'No extractors defined',
|
||||
suggestion: 'Add extractors to pull physics values (displacement, stress, frequency) from FEA results.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'NO_MODEL',
|
||||
check: (spec) => {
|
||||
if (!spec.model.sim?.path) {
|
||||
return {
|
||||
code: 'NO_MODEL',
|
||||
severity: 'error',
|
||||
path: 'model.sim.path',
|
||||
message: 'No simulation file configured',
|
||||
suggestion: 'Select a .sim file in the study\'s model directory.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
// ---- Design Variable Validation ----
|
||||
|
||||
{
|
||||
code: 'DV_INVALID_BOUNDS',
|
||||
check: (spec) => {
|
||||
for (const dv of spec.design_variables) {
|
||||
if (dv.enabled === false) continue;
|
||||
if (dv.bounds.min >= dv.bounds.max) {
|
||||
return {
|
||||
code: 'DV_INVALID_BOUNDS',
|
||||
severity: 'error',
|
||||
path: `design_variables.${dv.id}`,
|
||||
message: `Design variable "${dv.name}" has invalid bounds (min >= max)`,
|
||||
suggestion: `Set min (${dv.bounds.min}) to be less than max (${dv.bounds.max}).`,
|
||||
nodeId: dv.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'DV_NO_EXPRESSION',
|
||||
check: (spec) => {
|
||||
for (const dv of spec.design_variables) {
|
||||
if (dv.enabled === false) continue;
|
||||
if (!dv.expression_name || dv.expression_name.trim() === '') {
|
||||
return {
|
||||
code: 'DV_NO_EXPRESSION',
|
||||
severity: 'error',
|
||||
path: `design_variables.${dv.id}`,
|
||||
message: `Design variable "${dv.name}" has no NX expression name`,
|
||||
suggestion: 'Set the expression_name to match an NX expression in the model.',
|
||||
nodeId: dv.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
// ---- Extractor Validation ----
|
||||
|
||||
{
|
||||
code: 'EXTRACTOR_NO_TYPE',
|
||||
check: (spec) => {
|
||||
for (const ext of spec.extractors) {
|
||||
if (!ext.type || ext.type.trim() === '') {
|
||||
return {
|
||||
code: 'EXTRACTOR_NO_TYPE',
|
||||
severity: 'error',
|
||||
path: `extractors.${ext.id}`,
|
||||
message: `Extractor "${ext.name}" has no type selected`,
|
||||
suggestion: 'Select an extractor type (displacement, stress, frequency, etc.).',
|
||||
nodeId: ext.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'CUSTOM_EXTRACTOR_NO_CODE',
|
||||
check: (spec) => {
|
||||
for (const ext of spec.extractors) {
|
||||
if (ext.type === 'custom_function' && (!ext.function?.source_code || ext.function.source_code.trim() === '')) {
|
||||
return {
|
||||
code: 'CUSTOM_EXTRACTOR_NO_CODE',
|
||||
severity: 'error',
|
||||
path: `extractors.${ext.id}`,
|
||||
message: `Custom extractor "${ext.name}" has no code defined`,
|
||||
suggestion: 'Open the code editor and write the extraction function.',
|
||||
nodeId: ext.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
// ---- Objective Validation ----
|
||||
|
||||
{
|
||||
code: 'OBJECTIVE_NO_SOURCE',
|
||||
check: (spec) => {
|
||||
for (const obj of spec.objectives) {
|
||||
// Check if objective is connected to an extractor via canvas edges
|
||||
const hasSource = spec.canvas?.edges?.some(
|
||||
edge => edge.target === obj.id && edge.source.startsWith('ext_')
|
||||
);
|
||||
|
||||
// Also check if source.extractor_id is set
|
||||
const hasDirectSource = obj.source?.extractor_id &&
|
||||
spec.extractors.some(e => e.id === obj.source.extractor_id);
|
||||
|
||||
if (!hasSource && !hasDirectSource) {
|
||||
return {
|
||||
code: 'OBJECTIVE_NO_SOURCE',
|
||||
severity: 'error',
|
||||
path: `objectives.${obj.id}`,
|
||||
message: `Objective "${obj.name}" has no connected extractor`,
|
||||
suggestion: 'Connect an extractor to this objective or set source_extractor_id.',
|
||||
nodeId: obj.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
// ---- Constraint Validation ----
|
||||
|
||||
{
|
||||
code: 'CONSTRAINT_NO_THRESHOLD',
|
||||
check: (spec) => {
|
||||
for (const con of spec.constraints || []) {
|
||||
if (con.threshold === undefined || con.threshold === null) {
|
||||
return {
|
||||
code: 'CONSTRAINT_NO_THRESHOLD',
|
||||
severity: 'error',
|
||||
path: `constraints.${con.id}`,
|
||||
message: `Constraint "${con.name}" has no threshold value`,
|
||||
suggestion: 'Set a threshold value for the constraint.',
|
||||
nodeId: con.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
// ---- Warnings (can proceed but risky) ----
|
||||
|
||||
{
|
||||
code: 'HIGH_TRIAL_COUNT',
|
||||
check: (spec) => {
|
||||
const maxTrials = spec.optimization.budget?.max_trials || 100;
|
||||
if (maxTrials > 500) {
|
||||
return {
|
||||
code: 'HIGH_TRIAL_COUNT',
|
||||
severity: 'warning',
|
||||
path: 'optimization.budget.max_trials',
|
||||
message: `High trial count (${maxTrials}) may take several hours to complete`,
|
||||
suggestion: 'Consider starting with fewer trials (50-100) to validate the setup.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'SINGLE_TRIAL',
|
||||
check: (spec) => {
|
||||
const maxTrials = spec.optimization.budget?.max_trials || 100;
|
||||
if (maxTrials === 1) {
|
||||
return {
|
||||
code: 'SINGLE_TRIAL',
|
||||
severity: 'warning',
|
||||
path: 'optimization.budget.max_trials',
|
||||
message: 'Only 1 trial configured - this will just run a single evaluation',
|
||||
suggestion: 'Increase max_trials to explore the design space.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'DV_NARROW_BOUNDS',
|
||||
check: (spec) => {
|
||||
for (const dv of spec.design_variables) {
|
||||
if (dv.enabled === false) continue;
|
||||
const range = dv.bounds.max - dv.bounds.min;
|
||||
const baseline = dv.baseline || (dv.bounds.min + dv.bounds.max) / 2;
|
||||
const relativeRange = range / Math.abs(baseline || 1);
|
||||
|
||||
if (relativeRange < 0.01) { // Less than 1% variation
|
||||
return {
|
||||
code: 'DV_NARROW_BOUNDS',
|
||||
severity: 'warning',
|
||||
path: `design_variables.${dv.id}`,
|
||||
message: `Design variable "${dv.name}" has very narrow bounds (<1% range)`,
|
||||
suggestion: 'Consider widening the bounds for more meaningful exploration.',
|
||||
nodeId: dv.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'MANY_DESIGN_VARS',
|
||||
check: (spec) => {
|
||||
const enabledDVs = spec.design_variables.filter(dv => dv.enabled !== false);
|
||||
if (enabledDVs.length > 10) {
|
||||
return {
|
||||
code: 'MANY_DESIGN_VARS',
|
||||
severity: 'warning',
|
||||
path: 'design_variables',
|
||||
message: `${enabledDVs.length} design variables - high-dimensional space may need more trials`,
|
||||
suggestion: 'Consider enabling neural surrogate acceleration or increasing trial budget.',
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
code: 'MULTI_OBJECTIVE_NO_WEIGHTS',
|
||||
check: (spec) => {
|
||||
if (spec.objectives.length > 1) {
|
||||
const hasWeights = spec.objectives.every(obj => obj.weight !== undefined && obj.weight !== null);
|
||||
if (!hasWeights) {
|
||||
return {
|
||||
code: 'MULTI_OBJECTIVE_NO_WEIGHTS',
|
||||
severity: 'warning',
|
||||
path: 'objectives',
|
||||
message: 'Multi-objective optimization without explicit weights',
|
||||
suggestion: 'Consider setting weights to control the trade-off between objectives.',
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
// ============================================================================
|
||||
// Main Validation Function
|
||||
// ============================================================================
|
||||
|
||||
export function validateSpec(spec: AtomizerSpec): ValidationData {
|
||||
const errors: ValidationError[] = [];
|
||||
const warnings: ValidationError[] = [];
|
||||
|
||||
for (const rule of validationRules) {
|
||||
const result = rule.check(spec);
|
||||
if (result) {
|
||||
if (result.severity === 'error') {
|
||||
errors.push(result);
|
||||
} else {
|
||||
warnings.push(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
warnings,
|
||||
checkedAt: Date.now(),
|
||||
};
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Quick Validation (just checks if can run)
|
||||
// ============================================================================
|
||||
|
||||
export function canRunOptimization(spec: AtomizerSpec): { canRun: boolean; reason?: string } {
|
||||
// Check critical requirements only
|
||||
if (!spec.model.sim?.path) {
|
||||
return { canRun: false, reason: 'No simulation file configured' };
|
||||
}
|
||||
|
||||
const enabledDVs = spec.design_variables.filter(dv => dv.enabled !== false);
|
||||
if (enabledDVs.length === 0) {
|
||||
return { canRun: false, reason: 'No design variables defined' };
|
||||
}
|
||||
|
||||
if (spec.objectives.length === 0) {
|
||||
return { canRun: false, reason: 'No objectives defined' };
|
||||
}
|
||||
|
||||
if (spec.extractors.length === 0) {
|
||||
return { canRun: false, reason: 'No extractors defined' };
|
||||
}
|
||||
|
||||
// Check for invalid bounds
|
||||
for (const dv of enabledDVs) {
|
||||
if (dv.bounds.min >= dv.bounds.max) {
|
||||
return { canRun: false, reason: `Invalid bounds for "${dv.name}"` };
|
||||
}
|
||||
}
|
||||
|
||||
return { canRun: true };
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Export validation result type for backward compatibility
|
||||
// ============================================================================
|
||||
|
||||
export interface LegacyValidationResult {
|
||||
valid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
}
|
||||
|
||||
export function toLegacyValidationResult(data: ValidationData): LegacyValidationResult {
|
||||
return {
|
||||
valid: data.valid,
|
||||
errors: data.errors.map(e => e.message),
|
||||
warnings: data.warnings.map(w => w.message),
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useState, useEffect, lazy, Suspense, useMemo } from 'react';
|
||||
import { useState, useEffect, useMemo } from 'react';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
import {
|
||||
BarChart3,
|
||||
@@ -14,25 +14,10 @@ import {
|
||||
} from 'lucide-react';
|
||||
import { useStudy } from '../context/StudyContext';
|
||||
import { Card } from '../components/common/Card';
|
||||
|
||||
// Lazy load charts
|
||||
const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
|
||||
const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
|
||||
const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
|
||||
const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
|
||||
const PlotlyCorrelationHeatmap = lazy(() => import('../components/plotly/PlotlyCorrelationHeatmap').then(m => ({ default: m.PlotlyCorrelationHeatmap })));
|
||||
const PlotlyFeasibilityChart = lazy(() => import('../components/plotly/PlotlyFeasibilityChart').then(m => ({ default: m.PlotlyFeasibilityChart })));
|
||||
const PlotlySurrogateQuality = lazy(() => import('../components/plotly/PlotlySurrogateQuality').then(m => ({ default: m.PlotlySurrogateQuality })));
|
||||
const PlotlyRunComparison = lazy(() => import('../components/plotly/PlotlyRunComparison').then(m => ({ default: m.PlotlyRunComparison })));
|
||||
|
||||
const ChartLoading = () => (
|
||||
<div className="flex items-center justify-center h-64 text-dark-400">
|
||||
<div className="flex flex-col items-center gap-2">
|
||||
<div className="animate-spin w-6 h-6 border-2 border-primary-500 border-t-transparent rounded-full"></div>
|
||||
<span className="text-sm animate-pulse">Loading chart...</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
import { ConvergencePlot } from '../components/ConvergencePlot';
|
||||
import { ParameterImportanceChart } from '../components/ParameterImportanceChart';
|
||||
import { ParallelCoordinatesPlot } from '../components/ParallelCoordinatesPlot';
|
||||
import { ParetoPlot } from '../components/ParetoPlot';
|
||||
|
||||
const NoData = ({ message = 'No data available' }: { message?: string }) => (
|
||||
<div className="flex items-center justify-center h-64 text-dark-500">
|
||||
@@ -383,15 +368,12 @@ export default function Analysis() {
|
||||
{/* Convergence Plot */}
|
||||
{trials.length > 0 && (
|
||||
<Card title="Convergence Plot">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyConvergencePlot
|
||||
trials={trials}
|
||||
objectiveIndex={0}
|
||||
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
|
||||
direction="minimize"
|
||||
height={350}
|
||||
/>
|
||||
</Suspense>
|
||||
<ConvergencePlot
|
||||
trials={trials}
|
||||
objectiveIndex={0}
|
||||
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
|
||||
direction="minimize"
|
||||
/>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
@@ -455,30 +437,24 @@ export default function Analysis() {
|
||||
{/* Parameter Importance */}
|
||||
{trials.length > 0 && metadata?.design_variables && (
|
||||
<Card title="Parameter Importance">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParameterImportance
|
||||
trials={trials}
|
||||
designVariables={metadata.design_variables}
|
||||
objectiveIndex={0}
|
||||
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
|
||||
height={400}
|
||||
/>
|
||||
</Suspense>
|
||||
<ParameterImportanceChart
|
||||
trials={trials}
|
||||
designVariables={metadata.design_variables}
|
||||
objectiveIndex={0}
|
||||
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
|
||||
/>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
{/* Parallel Coordinates */}
|
||||
{trials.length > 0 && metadata && (
|
||||
<Card title="Parallel Coordinates">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParallelCoordinates
|
||||
trials={trials}
|
||||
objectives={metadata.objectives || []}
|
||||
designVariables={metadata.design_variables || []}
|
||||
paretoFront={paretoFront}
|
||||
height={450}
|
||||
/>
|
||||
</Suspense>
|
||||
<ParallelCoordinatesPlot
|
||||
paretoData={trials}
|
||||
objectives={metadata.objectives || []}
|
||||
designVariables={metadata.design_variables || []}
|
||||
paretoFront={paretoFront}
|
||||
/>
|
||||
</Card>
|
||||
)}
|
||||
</div>
|
||||
@@ -508,14 +484,11 @@ export default function Analysis() {
|
||||
{/* Pareto Front Plot */}
|
||||
{paretoFront.length > 0 && (
|
||||
<Card title="Pareto Front">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParetoPlot
|
||||
trials={trials}
|
||||
paretoFront={paretoFront}
|
||||
objectives={metadata?.objectives || []}
|
||||
height={500}
|
||||
/>
|
||||
</Suspense>
|
||||
<ParetoPlot
|
||||
paretoData={paretoFront}
|
||||
objectives={metadata?.objectives || []}
|
||||
allTrials={trials}
|
||||
/>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
@@ -550,16 +523,10 @@ export default function Analysis() {
|
||||
{/* Correlations Tab */}
|
||||
{activeTab === 'correlations' && (
|
||||
<div className="space-y-6">
|
||||
{/* Correlation Heatmap */}
|
||||
{/* Correlation Analysis */}
|
||||
{trials.length > 2 && (
|
||||
<Card title="Parameter-Objective Correlation Matrix">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyCorrelationHeatmap
|
||||
trials={trials}
|
||||
objectiveName={metadata?.objectives?.[0]?.name || 'Objective'}
|
||||
height={Math.min(500, 100 + Object.keys(trials[0]?.params || {}).length * 40)}
|
||||
/>
|
||||
</Suspense>
|
||||
<Card title="Parameter-Objective Correlation Analysis">
|
||||
<CorrelationTable trials={trials} objectiveName={metadata?.objectives?.[0]?.name || 'Objective'} />
|
||||
</Card>
|
||||
)}
|
||||
|
||||
@@ -612,11 +579,22 @@ export default function Analysis() {
|
||||
</Card>
|
||||
</div>
|
||||
|
||||
{/* Feasibility Over Time Chart */}
|
||||
<Card title="Feasibility Rate Over Time">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyFeasibilityChart trials={trials} height={350} />
|
||||
</Suspense>
|
||||
{/* Feasibility Summary */}
|
||||
<Card title="Feasibility Analysis">
|
||||
<div className="p-4">
|
||||
<div className="flex items-center gap-4 mb-4">
|
||||
<div className="flex-1 bg-dark-700 rounded-full h-4 overflow-hidden">
|
||||
<div
|
||||
className="h-full bg-green-500 transition-all duration-500"
|
||||
style={{ width: `${stats.feasibilityRate}%` }}
|
||||
/>
|
||||
</div>
|
||||
<span className="text-lg font-bold text-green-400">{stats.feasibilityRate.toFixed(1)}%</span>
|
||||
</div>
|
||||
<p className="text-dark-400 text-sm">
|
||||
{stats.feasible} of {stats.total} trials satisfy all constraints
|
||||
</p>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
{/* Infeasible Trials List */}
|
||||
@@ -683,11 +661,38 @@ export default function Analysis() {
|
||||
</Card>
|
||||
</div>
|
||||
|
||||
{/* Surrogate Quality Charts */}
|
||||
<Card title="Surrogate Model Analysis">
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlySurrogateQuality trials={trials} height={400} />
|
||||
</Suspense>
|
||||
{/* Surrogate Performance Summary */}
|
||||
<Card title="Surrogate Model Performance">
|
||||
<div className="grid grid-cols-2 gap-6 p-4">
|
||||
<div>
|
||||
<h4 className="text-sm font-semibold text-dark-300 mb-3">Trial Distribution</h4>
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="w-3 h-3 bg-blue-500 rounded-full"></div>
|
||||
<span className="text-dark-200">FEA: {stats.feaTrials} trials</span>
|
||||
<span className="text-dark-400 ml-auto">
|
||||
{((stats.feaTrials / stats.total) * 100).toFixed(0)}%
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="w-3 h-3 bg-purple-500 rounded-full"></div>
|
||||
<span className="text-dark-200">NN: {stats.nnTrials} trials</span>
|
||||
<span className="text-dark-400 ml-auto">
|
||||
{((stats.nnTrials / stats.total) * 100).toFixed(0)}%
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-semibold text-dark-300 mb-3">Efficiency Gains</h4>
|
||||
<div className="text-center p-4 bg-dark-750 rounded-lg">
|
||||
<div className="text-3xl font-bold text-primary-400">
|
||||
{stats.feaTrials > 0 ? `${(stats.total / stats.feaTrials).toFixed(1)}x` : '1.0x'}
|
||||
</div>
|
||||
<div className="text-xs text-dark-400 mt-1">Effective Speedup</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
)}
|
||||
@@ -700,9 +705,36 @@ export default function Analysis() {
|
||||
Compare different optimization runs within this study. Studies with adaptive optimization
|
||||
may have multiple runs (e.g., initial FEA exploration, NN-accelerated iterations).
|
||||
</p>
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyRunComparison runs={runs} height={400} />
|
||||
</Suspense>
|
||||
<div className="overflow-x-auto">
|
||||
<table className="w-full text-sm">
|
||||
<thead>
|
||||
<tr className="border-b border-dark-600">
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Run</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Source</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Trials</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Best Value</th>
|
||||
<th className="text-left py-2 px-3 text-dark-400 font-medium">Avg Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{runs.map((run) => (
|
||||
<tr key={run.run_id} className="border-b border-dark-700">
|
||||
<td className="py-2 px-3 font-mono text-white">{run.name || `Run ${run.run_id}`}</td>
|
||||
<td className="py-2 px-3">
|
||||
<span className={`px-2 py-0.5 rounded text-xs ${
|
||||
run.source === 'NN' ? 'bg-purple-500/20 text-purple-400' : 'bg-blue-500/20 text-blue-400'
|
||||
}`}>
|
||||
{run.source}
|
||||
</span>
|
||||
</td>
|
||||
<td className="py-2 px-3 text-dark-200">{run.trial_count}</td>
|
||||
<td className="py-2 px-3 font-mono text-green-400">{run.best_value?.toExponential(4) || 'N/A'}</td>
|
||||
<td className="py-2 px-3 font-mono text-dark-300">{run.avg_value?.toExponential(4) || 'N/A'}</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -10,8 +10,12 @@ import { ConfigImporter } from '../components/canvas/panels/ConfigImporter';
|
||||
import { NodeConfigPanel } from '../components/canvas/panels/NodeConfigPanel';
|
||||
import { NodeConfigPanelV2 } from '../components/canvas/panels/NodeConfigPanelV2';
|
||||
import { ChatPanel } from '../components/canvas/panels/ChatPanel';
|
||||
import { PanelContainer } from '../components/canvas/panels/PanelContainer';
|
||||
import { ResizeHandle } from '../components/canvas/ResizeHandle';
|
||||
import { useCanvasStore } from '../hooks/useCanvasStore';
|
||||
import { useSpecStore, useSpec, useSpecLoading, useSpecIsDirty, useSelectedNodeId } from '../hooks/useSpecStore';
|
||||
import { useResizablePanel } from '../hooks/useResizablePanel';
|
||||
// usePanelStore is now used by child components - PanelContainer handles panels
|
||||
import { useSpecUndoRedo, useUndoRedoKeyboard } from '../hooks/useSpecUndoRedo';
|
||||
import { useStudy } from '../context/StudyContext';
|
||||
import { useChat } from '../hooks/useChat';
|
||||
@@ -29,6 +33,23 @@ export function CanvasView() {
|
||||
const [paletteCollapsed, setPaletteCollapsed] = useState(false);
|
||||
const [leftSidebarTab, setLeftSidebarTab] = useState<'components' | 'files'>('components');
|
||||
const navigate = useNavigate();
|
||||
|
||||
// Resizable panels
|
||||
const leftPanel = useResizablePanel({
|
||||
storageKey: 'left-sidebar',
|
||||
defaultWidth: 240,
|
||||
minWidth: 200,
|
||||
maxWidth: 400,
|
||||
side: 'left',
|
||||
});
|
||||
|
||||
const rightPanel = useResizablePanel({
|
||||
storageKey: 'right-panel',
|
||||
defaultWidth: 384,
|
||||
minWidth: 280,
|
||||
maxWidth: 600,
|
||||
side: 'right',
|
||||
});
|
||||
const [searchParams] = useSearchParams();
|
||||
|
||||
// Spec mode is the default (AtomizerSpec v2.0)
|
||||
@@ -296,17 +317,34 @@ export function CanvasView() {
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="flex items-center gap-2">
|
||||
{/* Save Button - only show when there's a study and changes */}
|
||||
{activeStudyId && (
|
||||
{/* Save Button - always show in spec mode with study, grayed when no changes */}
|
||||
{useSpecMode && spec && (
|
||||
<button
|
||||
onClick={saveToConfig}
|
||||
disabled={isSaving || (useSpecMode ? !specIsDirty : !hasUnsavedChanges)}
|
||||
disabled={isSaving || !specIsDirty}
|
||||
className={`px-3 py-1.5 text-sm rounded-lg transition-colors flex items-center gap-1.5 ${
|
||||
(useSpecMode ? specIsDirty : hasUnsavedChanges)
|
||||
specIsDirty
|
||||
? 'bg-green-600 hover:bg-green-500 text-white'
|
||||
: 'bg-dark-700 text-dark-400 cursor-not-allowed border border-dark-600'
|
||||
}`}
|
||||
title={(useSpecMode ? specIsDirty : hasUnsavedChanges) ? `Save changes to ${useSpecMode ? 'atomizer_spec.json' : 'optimization_config.json'}` : 'No changes to save'}
|
||||
title={specIsDirty ? 'Save changes to atomizer_spec.json' : 'No changes to save'}
|
||||
>
|
||||
<Save size={14} />
|
||||
{isSaving ? 'Saving...' : 'Save'}
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* Legacy Save Button */}
|
||||
{!useSpecMode && activeStudyId && (
|
||||
<button
|
||||
onClick={saveToConfig}
|
||||
disabled={isSaving || !hasUnsavedChanges}
|
||||
className={`px-3 py-1.5 text-sm rounded-lg transition-colors flex items-center gap-1.5 ${
|
||||
hasUnsavedChanges
|
||||
? 'bg-green-600 hover:bg-green-500 text-white'
|
||||
: 'bg-dark-700 text-dark-400 cursor-not-allowed border border-dark-600'
|
||||
}`}
|
||||
title={hasUnsavedChanges ? 'Save changes to optimization_config.json' : 'No changes to save'}
|
||||
>
|
||||
<Save size={14} />
|
||||
{isSaving ? 'Saving...' : 'Save'}
|
||||
@@ -314,7 +352,7 @@ export function CanvasView() {
|
||||
)}
|
||||
|
||||
{/* Reload Button */}
|
||||
{activeStudyId && (
|
||||
{(useSpecMode ? spec : activeStudyId) && (
|
||||
<button
|
||||
onClick={handleReload}
|
||||
disabled={isLoading || specLoading}
|
||||
@@ -404,7 +442,10 @@ export function CanvasView() {
|
||||
<main className="flex-1 overflow-hidden flex">
|
||||
{/* Left Sidebar with tabs (spec mode only - AtomizerCanvas has its own) */}
|
||||
{useSpecMode && (
|
||||
<div className={`${paletteCollapsed ? 'w-14' : 'w-60'} bg-dark-850 border-r border-dark-700 flex flex-col transition-all duration-200`}>
|
||||
<div
|
||||
className="relative bg-dark-850 border-r border-dark-700 flex flex-col"
|
||||
style={{ width: paletteCollapsed ? 56 : leftPanel.width }}
|
||||
>
|
||||
{/* Tab buttons (only show when expanded) */}
|
||||
{!paletteCollapsed && (
|
||||
<div className="flex border-b border-dark-700">
|
||||
@@ -450,6 +491,16 @@ export function CanvasView() {
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Resize handle (only when not collapsed) */}
|
||||
{!paletteCollapsed && (
|
||||
<ResizeHandle
|
||||
onMouseDown={leftPanel.startDrag}
|
||||
onDoubleClick={leftPanel.resetWidth}
|
||||
isDragging={leftPanel.isDragging}
|
||||
position="right"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -472,19 +523,38 @@ export function CanvasView() {
|
||||
</div>
|
||||
|
||||
{/* Config Panel - use V2 for spec mode, legacy for AtomizerCanvas */}
|
||||
{selectedNodeId && !showChat && (
|
||||
{/* Shows INSTEAD of chat when a node is selected */}
|
||||
{selectedNodeId ? (
|
||||
useSpecMode ? (
|
||||
<NodeConfigPanelV2 onClose={() => useSpecStore.getState().clearSelection()} />
|
||||
<div
|
||||
className="relative border-l border-dark-700 bg-dark-850 flex flex-col"
|
||||
style={{ width: rightPanel.width }}
|
||||
>
|
||||
<ResizeHandle
|
||||
onMouseDown={rightPanel.startDrag}
|
||||
onDoubleClick={rightPanel.resetWidth}
|
||||
isDragging={rightPanel.isDragging}
|
||||
position="left"
|
||||
/>
|
||||
<NodeConfigPanelV2 onClose={() => useSpecStore.getState().clearSelection()} />
|
||||
</div>
|
||||
) : (
|
||||
<div className="w-80 border-l border-dark-700 bg-dark-850 overflow-y-auto">
|
||||
<NodeConfigPanel nodeId={selectedNodeId} />
|
||||
</div>
|
||||
)
|
||||
)}
|
||||
|
||||
{/* Chat/Assistant Panel */}
|
||||
{showChat && (
|
||||
<div className="w-96 border-l border-dark-700 bg-dark-850 flex flex-col">
|
||||
) : showChat ? (
|
||||
<div
|
||||
className="relative border-l border-dark-700 bg-dark-850 flex flex-col"
|
||||
style={{ width: rightPanel.width }}
|
||||
>
|
||||
{/* Resize handle */}
|
||||
<ResizeHandle
|
||||
onMouseDown={rightPanel.startDrag}
|
||||
onDoubleClick={rightPanel.resetWidth}
|
||||
isDragging={rightPanel.isDragging}
|
||||
position="left"
|
||||
/>
|
||||
{/* Chat Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-dark-700">
|
||||
<div className="flex items-center gap-2">
|
||||
@@ -524,7 +594,7 @@ export function CanvasView() {
|
||||
isConnected={isConnected}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
) : null}
|
||||
</main>
|
||||
|
||||
{/* Template Selector Modal */}
|
||||
@@ -541,6 +611,9 @@ export function CanvasView() {
|
||||
onImport={handleImport}
|
||||
/>
|
||||
|
||||
{/* Floating Panels (Introspection, Validation, Error, Results) */}
|
||||
{useSpecMode && <PanelContainer />}
|
||||
|
||||
{/* Notification Toast */}
|
||||
{notification && (
|
||||
<div
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useState, useEffect, lazy, Suspense, useRef } from 'react';
|
||||
import { useState, useEffect, useRef } from 'react';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
import { Settings } from 'lucide-react';
|
||||
import { useOptimizationWebSocket } from '../hooks/useWebSocket';
|
||||
@@ -21,19 +21,6 @@ import { CurrentTrialPanel, OptimizerStatePanel } from '../components/tracker';
|
||||
import { NivoParallelCoordinates } from '../components/charts';
|
||||
import type { Trial } from '../types';
|
||||
|
||||
// Lazy load Plotly components for better initial load performance
|
||||
const PlotlyParallelCoordinates = lazy(() => import('../components/plotly/PlotlyParallelCoordinates').then(m => ({ default: m.PlotlyParallelCoordinates })));
|
||||
const PlotlyParetoPlot = lazy(() => import('../components/plotly/PlotlyParetoPlot').then(m => ({ default: m.PlotlyParetoPlot })));
|
||||
const PlotlyConvergencePlot = lazy(() => import('../components/plotly/PlotlyConvergencePlot').then(m => ({ default: m.PlotlyConvergencePlot })));
|
||||
const PlotlyParameterImportance = lazy(() => import('../components/plotly/PlotlyParameterImportance').then(m => ({ default: m.PlotlyParameterImportance })));
|
||||
|
||||
// Loading placeholder for lazy components
|
||||
const ChartLoading = () => (
|
||||
<div className="flex items-center justify-center h-64 text-dark-400">
|
||||
<div className="animate-pulse">Loading chart...</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
export default function Dashboard() {
|
||||
const navigate = useNavigate();
|
||||
const { selectedStudy, refreshStudies, isInitialized } = useStudy();
|
||||
@@ -62,8 +49,8 @@ export default function Dashboard() {
|
||||
const [paretoFront, setParetoFront] = useState<any[]>([]);
|
||||
const [allTrialsRaw, setAllTrialsRaw] = useState<any[]>([]); // All trials for parallel coordinates
|
||||
|
||||
// Chart library toggle: 'nivo' (dark theme, default), 'plotly' (more interactive), or 'recharts' (simple)
|
||||
const [chartLibrary, setChartLibrary] = useState<'nivo' | 'plotly' | 'recharts'>('nivo');
|
||||
// Chart library toggle: 'nivo' (dark theme, default) or 'recharts' (simple)
|
||||
const [chartLibrary, setChartLibrary] = useState<'nivo' | 'recharts'>('nivo');
|
||||
|
||||
// Process status for tracker panels
|
||||
const [isRunning, setIsRunning] = useState(false);
|
||||
@@ -464,18 +451,7 @@ export default function Dashboard() {
|
||||
}`}
|
||||
title="Modern Nivo charts with dark theme (recommended)"
|
||||
>
|
||||
Nivo
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setChartLibrary('plotly')}
|
||||
className={`px-3 py-1.5 text-sm transition-colors ${
|
||||
chartLibrary === 'plotly'
|
||||
? 'bg-primary-500 text-white'
|
||||
: 'bg-dark-600 text-dark-200 hover:bg-dark-500'
|
||||
}`}
|
||||
title="Interactive Plotly charts with zoom, pan, and export"
|
||||
>
|
||||
Plotly
|
||||
Advanced
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setChartLibrary('recharts')}
|
||||
@@ -570,22 +546,11 @@ export default function Dashboard() {
|
||||
title="Pareto Front"
|
||||
subtitle={`${paretoFront.length} Pareto-optimal solutions | ${studyMetadata.sampler || 'NSGA-II'} | ${studyMetadata.objectives?.length || 2} objectives`}
|
||||
>
|
||||
{chartLibrary === 'plotly' ? (
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParetoPlot
|
||||
trials={allTrialsRaw}
|
||||
paretoFront={paretoFront}
|
||||
objectives={studyMetadata.objectives}
|
||||
height={300}
|
||||
/>
|
||||
</Suspense>
|
||||
) : (
|
||||
<ParetoPlot
|
||||
paretoData={paretoFront}
|
||||
objectives={studyMetadata.objectives}
|
||||
allTrials={allTrialsRaw}
|
||||
/>
|
||||
)}
|
||||
<ParetoPlot
|
||||
paretoData={paretoFront}
|
||||
objectives={studyMetadata.objectives}
|
||||
allTrials={allTrialsRaw}
|
||||
/>
|
||||
</ExpandableChart>
|
||||
</div>
|
||||
)}
|
||||
@@ -605,16 +570,6 @@ export default function Dashboard() {
|
||||
paretoFront={paretoFront}
|
||||
height={380}
|
||||
/>
|
||||
) : chartLibrary === 'plotly' ? (
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParallelCoordinates
|
||||
trials={allTrialsRaw}
|
||||
objectives={studyMetadata.objectives}
|
||||
designVariables={studyMetadata.design_variables}
|
||||
paretoFront={paretoFront}
|
||||
height={350}
|
||||
/>
|
||||
</Suspense>
|
||||
) : (
|
||||
<ParallelCoordinatesPlot
|
||||
paretoData={allTrialsRaw}
|
||||
@@ -634,24 +589,12 @@ export default function Dashboard() {
|
||||
title="Convergence"
|
||||
subtitle={`Best ${studyMetadata?.objectives?.[0]?.name || 'Objective'} over ${allTrialsRaw.length} trials`}
|
||||
>
|
||||
{chartLibrary === 'plotly' ? (
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyConvergencePlot
|
||||
trials={allTrialsRaw}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
direction="minimize"
|
||||
height={280}
|
||||
/>
|
||||
</Suspense>
|
||||
) : (
|
||||
<ConvergencePlot
|
||||
trials={allTrialsRaw}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
direction="minimize"
|
||||
/>
|
||||
)}
|
||||
<ConvergencePlot
|
||||
trials={allTrialsRaw}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
direction="minimize"
|
||||
/>
|
||||
</ExpandableChart>
|
||||
</div>
|
||||
)}
|
||||
@@ -663,32 +606,16 @@ export default function Dashboard() {
|
||||
title="Parameter Importance"
|
||||
subtitle={`Correlation with ${studyMetadata?.objectives?.[0]?.name || 'Objective'}`}
|
||||
>
|
||||
{chartLibrary === 'plotly' ? (
|
||||
<Suspense fallback={<ChartLoading />}>
|
||||
<PlotlyParameterImportance
|
||||
trials={allTrialsRaw}
|
||||
designVariables={
|
||||
studyMetadata?.design_variables?.length > 0
|
||||
? studyMetadata.design_variables
|
||||
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
|
||||
}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
height={280}
|
||||
/>
|
||||
</Suspense>
|
||||
) : (
|
||||
<ParameterImportanceChart
|
||||
trials={allTrialsRaw}
|
||||
designVariables={
|
||||
studyMetadata?.design_variables?.length > 0
|
||||
? studyMetadata.design_variables
|
||||
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
|
||||
}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
/>
|
||||
)}
|
||||
<ParameterImportanceChart
|
||||
trials={allTrialsRaw}
|
||||
designVariables={
|
||||
studyMetadata?.design_variables?.length > 0
|
||||
? studyMetadata.design_variables
|
||||
: Object.keys(allTrialsRaw[0]?.params || {}).map(name => ({ name }))
|
||||
}
|
||||
objectiveIndex={0}
|
||||
objectiveName={studyMetadata?.objectives?.[0]?.name || 'Objective'}
|
||||
/>
|
||||
</ExpandableChart>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -394,18 +394,32 @@ const Home: React.FC = () => {
|
||||
<p className="text-dark-400 text-sm">Study Documentation</p>
|
||||
</div>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => handleSelectStudy(selectedPreview)}
|
||||
className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
|
||||
style={{
|
||||
background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
|
||||
color: '#000',
|
||||
boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
|
||||
}}
|
||||
>
|
||||
Open
|
||||
<ArrowRight className="w-4 h-4" />
|
||||
</button>
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={() => navigate(`/canvas/${selectedPreview.id}`)}
|
||||
className="flex items-center gap-2 px-4 py-2.5 rounded-lg transition-all font-medium whitespace-nowrap hover:-translate-y-0.5"
|
||||
style={{
|
||||
background: 'rgba(8, 15, 26, 0.85)',
|
||||
border: '1px solid rgba(0, 212, 230, 0.3)',
|
||||
color: '#00d4e6'
|
||||
}}
|
||||
>
|
||||
<Layers className="w-4 h-4" />
|
||||
Canvas
|
||||
</button>
|
||||
<button
|
||||
onClick={() => handleSelectStudy(selectedPreview)}
|
||||
className="flex items-center gap-2 px-5 py-2.5 rounded-lg transition-all font-semibold whitespace-nowrap hover:-translate-y-0.5"
|
||||
style={{
|
||||
background: 'linear-gradient(135deg, #00d4e6 0%, #0891b2 100%)',
|
||||
color: '#000',
|
||||
boxShadow: '0 4px 15px rgba(0, 212, 230, 0.3)'
|
||||
}}
|
||||
>
|
||||
Open
|
||||
<ArrowRight className="w-4 h-4" />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Study Quick Stats */}
|
||||
|
||||
@@ -20,11 +20,11 @@ import {
|
||||
ExternalLink,
|
||||
Zap,
|
||||
List,
|
||||
LucideIcon
|
||||
LucideIcon,
|
||||
FileText
|
||||
} from 'lucide-react';
|
||||
import { useStudy } from '../context/StudyContext';
|
||||
import { Card } from '../components/common/Card';
|
||||
import Plot from 'react-plotly.js';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
@@ -642,13 +642,15 @@ export default function Insights() {
|
||||
Open Full View
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={() => setFullscreen(true)}
|
||||
className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
|
||||
title="Fullscreen"
|
||||
>
|
||||
<Maximize2 className="w-5 h-5" />
|
||||
</button>
|
||||
{activeInsight.html_path && (
|
||||
<button
|
||||
onClick={() => setFullscreen(true)}
|
||||
className="p-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg transition-colors"
|
||||
title="Fullscreen"
|
||||
>
|
||||
<Maximize2 className="w-5 h-5" />
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -674,49 +676,43 @@ export default function Insights() {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Plotly Figure */}
|
||||
{/* Insight Result */}
|
||||
<Card className="p-0 overflow-hidden">
|
||||
{activeInsight.plotly_figure ? (
|
||||
<div className="bg-dark-900" style={{ height: '600px' }}>
|
||||
<Plot
|
||||
data={activeInsight.plotly_figure.data}
|
||||
layout={{
|
||||
...activeInsight.plotly_figure.layout,
|
||||
autosize: true,
|
||||
margin: { l: 60, r: 60, t: 60, b: 60 },
|
||||
paper_bgcolor: '#111827',
|
||||
plot_bgcolor: '#1f2937',
|
||||
font: { color: 'white' }
|
||||
}}
|
||||
config={{
|
||||
responsive: true,
|
||||
displayModeBar: true,
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%', height: '100%' }}
|
||||
/>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex flex-col items-center justify-center h-64 text-dark-400 p-8">
|
||||
<CheckCircle className="w-12 h-12 text-green-400 mb-4" />
|
||||
<p className="text-lg font-medium text-white mb-2">Insight Generated Successfully</p>
|
||||
<div className="flex flex-col items-center justify-center h-64 text-dark-400 p-8">
|
||||
<CheckCircle className="w-12 h-12 text-green-400 mb-4" />
|
||||
<p className="text-lg font-medium text-white mb-2">Insight Generated Successfully</p>
|
||||
{activeInsight.html_path ? (
|
||||
<>
|
||||
<p className="text-sm text-center mb-4">
|
||||
Click the button below to view the interactive visualization.
|
||||
</p>
|
||||
<button
|
||||
onClick={() => window.open(`/api/insights/studies/${selectedStudy?.id}/view/${activeInsight.insight_type}`, '_blank')}
|
||||
className="flex items-center gap-2 px-6 py-3 bg-primary-600 hover:bg-primary-500 text-white rounded-lg font-medium transition-colors"
|
||||
>
|
||||
<ExternalLink className="w-5 h-5" />
|
||||
Open Interactive Visualization
|
||||
</button>
|
||||
</>
|
||||
) : (
|
||||
<p className="text-sm text-center">
|
||||
This insight generates HTML files. Click "Open Full View" to see the visualization.
|
||||
The visualization has been generated. Check the study's insights folder.
|
||||
</p>
|
||||
{activeInsight.summary?.html_files && (
|
||||
<div className="mt-4 text-sm">
|
||||
<p className="text-dark-400 mb-2">Generated files:</p>
|
||||
<ul className="space-y-1">
|
||||
{(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
|
||||
<li key={i} className="text-dark-300">
|
||||
{f.split(/[/\\]/).pop()}
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
)}
|
||||
{activeInsight.summary?.html_files && (
|
||||
<div className="mt-4 text-sm">
|
||||
<p className="text-dark-400 mb-2">Generated files:</p>
|
||||
<ul className="space-y-1">
|
||||
{(activeInsight.summary.html_files as string[]).slice(0, 4).map((f: string, i: number) => (
|
||||
<li key={i} className="text-dark-300 flex items-center gap-2">
|
||||
<FileText className="w-3 h-3" />
|
||||
{f.split(/[/\\]/).pop()}
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
{/* Generate Another */}
|
||||
@@ -736,8 +732,8 @@ export default function Insights() {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Fullscreen Modal */}
|
||||
{fullscreen && activeInsight?.plotly_figure && (
|
||||
{/* Fullscreen Modal - now opens external HTML */}
|
||||
{fullscreen && activeInsight && (
|
||||
<div className="fixed inset-0 z-50 bg-dark-900 flex flex-col">
|
||||
<div className="flex items-center justify-between p-4 border-b border-dark-600">
|
||||
<h2 className="text-xl font-bold text-white">
|
||||
@@ -750,23 +746,24 @@ export default function Insights() {
|
||||
<X className="w-6 h-6" />
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex-1 p-4">
|
||||
<Plot
|
||||
data={activeInsight.plotly_figure.data}
|
||||
layout={{
|
||||
...activeInsight.plotly_figure.layout,
|
||||
autosize: true,
|
||||
paper_bgcolor: '#111827',
|
||||
plot_bgcolor: '#1f2937',
|
||||
font: { color: 'white' }
|
||||
}}
|
||||
config={{
|
||||
responsive: true,
|
||||
displayModeBar: true,
|
||||
displaylogo: false
|
||||
}}
|
||||
style={{ width: '100%', height: '100%' }}
|
||||
/>
|
||||
<div className="flex-1 p-4 flex items-center justify-center">
|
||||
{activeInsight.html_path ? (
|
||||
<iframe
|
||||
src={`/api/insights/studies/${selectedStudy?.id}/view/${activeInsight.insight_type}`}
|
||||
className="w-full h-full border-0 rounded-lg"
|
||||
title={activeInsight.insight_name || activeInsight.insight_type}
|
||||
/>
|
||||
) : (
|
||||
<div className="text-center text-dark-400">
|
||||
<p className="text-lg mb-4">No interactive visualization available for this insight.</p>
|
||||
<button
|
||||
onClick={() => setFullscreen(false)}
|
||||
className="px-4 py-2 bg-dark-700 hover:bg-dark-600 text-white rounded-lg"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -278,7 +278,7 @@ export default function Setup() {
|
||||
Configuration
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('canvas')}
|
||||
onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
|
||||
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-primary-600 text-white"
|
||||
>
|
||||
<Grid3X3 className="w-4 h-4" />
|
||||
@@ -333,7 +333,7 @@ export default function Setup() {
|
||||
Configuration
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('canvas')}
|
||||
onClick={() => navigate(`/canvas/${selectedStudy?.id || ''}`)}
|
||||
className="flex items-center gap-2 px-4 py-2 rounded-lg transition-colors bg-dark-800 text-dark-300 hover:text-white hover:bg-dark-700"
|
||||
>
|
||||
<Grid3X3 className="w-4 h-4" />
|
||||
|
||||
632
atomizer-dashboard/frontend/src/types/atomizer-spec.ts
Normal file
632
atomizer-dashboard/frontend/src/types/atomizer-spec.ts
Normal file
@@ -0,0 +1,632 @@
|
||||
/**
|
||||
* AtomizerSpec v2.0 TypeScript Types
|
||||
*
|
||||
* These types match the JSON Schema at optimization_engine/schemas/atomizer_spec_v2.json
|
||||
* This is the single source of truth for optimization configuration.
|
||||
*/
|
||||
|
||||
// ============================================================================
|
||||
// Position Types
|
||||
// ============================================================================
|
||||
|
||||
export interface CanvasPosition {
|
||||
x: number;
|
||||
y: number;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Meta Types
|
||||
// ============================================================================
|
||||
|
||||
export type SpecCreatedBy = 'canvas' | 'claude' | 'api' | 'migration' | 'manual';
|
||||
|
||||
export interface SpecMeta {
|
||||
/** Schema version (e.g., "2.0") */
|
||||
version: string;
|
||||
/** When the spec was created (ISO 8601) */
|
||||
created?: string;
|
||||
/** When the spec was last modified (ISO 8601) */
|
||||
modified?: string;
|
||||
/** Who/what created the spec */
|
||||
created_by?: SpecCreatedBy;
|
||||
/** Who/what last modified the spec */
|
||||
modified_by?: string;
|
||||
/** Unique study identifier (snake_case) */
|
||||
study_name: string;
|
||||
/** Human-readable description */
|
||||
description?: string;
|
||||
/** Tags for categorization */
|
||||
tags?: string[];
|
||||
/** Real-world engineering context */
|
||||
engineering_context?: string;
|
||||
/** Current workflow status */
|
||||
status?: 'draft' | 'introspected' | 'configured' | 'validated' | 'ready' | 'running' | 'completed' | 'failed';
|
||||
/** Topic/folder for organization */
|
||||
topic?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Model Types
|
||||
// ============================================================================
|
||||
|
||||
export interface NxPartConfig {
|
||||
/** Path to .prt file */
|
||||
path?: string;
|
||||
/** File hash for change detection */
|
||||
hash?: string;
|
||||
/** Idealized part filename (_i.prt) */
|
||||
idealized_part?: string;
|
||||
}
|
||||
|
||||
export interface FemConfig {
|
||||
/** Path to .fem file */
|
||||
path?: string;
|
||||
/** Number of elements */
|
||||
element_count?: number;
|
||||
/** Number of nodes */
|
||||
node_count?: number;
|
||||
}
|
||||
|
||||
export type SolverType = 'nastran' | 'NX_Nastran' | 'abaqus';
|
||||
|
||||
/**
|
||||
* SolverEngine - The actual solver software used for analysis
|
||||
* - nxnastran: NX Nastran (built into Siemens NX)
|
||||
* - mscnastran: MSC Nastran (external)
|
||||
* - python: Custom Python script
|
||||
* - abaqus: Abaqus (future)
|
||||
* - ansys: ANSYS (future)
|
||||
*/
|
||||
export type SolverEngine = 'nxnastran' | 'mscnastran' | 'python' | 'abaqus' | 'ansys';
|
||||
|
||||
/**
|
||||
* NastranSolutionType - Common Nastran solution types
|
||||
*/
|
||||
export type NastranSolutionType =
|
||||
| 'SOL101' // Linear Statics
|
||||
| 'SOL103' // Normal Modes
|
||||
| 'SOL105' // Buckling
|
||||
| 'SOL106' // Nonlinear Statics
|
||||
| 'SOL111' // Modal Frequency Response
|
||||
| 'SOL112' // Modal Transient Response
|
||||
| 'SOL200'; // Design Optimization
|
||||
|
||||
export type SubcaseType = 'static' | 'modal' | 'thermal' | 'buckling';
|
||||
|
||||
export interface Subcase {
|
||||
id: number;
|
||||
name?: string;
|
||||
type?: SubcaseType;
|
||||
}
|
||||
|
||||
export interface SimConfig {
|
||||
/** Path to .sim file */
|
||||
path: string;
|
||||
/** Solver type (legacy, use engine instead) */
|
||||
solver: SolverType;
|
||||
/** Solver engine software */
|
||||
engine?: SolverEngine;
|
||||
/** Solution type (e.g., SOL101) */
|
||||
solution_type?: NastranSolutionType | string;
|
||||
/** Python script path (for python engine) */
|
||||
script_path?: string;
|
||||
/** Defined subcases */
|
||||
subcases?: Subcase[];
|
||||
}
|
||||
|
||||
export interface NxSettings {
|
||||
nx_install_path?: string;
|
||||
simulation_timeout_s?: number;
|
||||
auto_start_nx?: boolean;
|
||||
}
|
||||
|
||||
export interface IntrospectionExpression {
|
||||
name: string;
|
||||
value: number | null;
|
||||
units: string | null;
|
||||
formula: string | null;
|
||||
is_candidate: boolean;
|
||||
confidence: number;
|
||||
}
|
||||
|
||||
export interface IntrospectionData {
|
||||
timestamp: string;
|
||||
solver_type: string | null;
|
||||
mass_kg: number | null;
|
||||
volume_mm3: number | null;
|
||||
expressions: IntrospectionExpression[];
|
||||
warnings: string[];
|
||||
baseline: {
|
||||
timestamp: string;
|
||||
solve_time_seconds: number;
|
||||
mass_kg: number | null;
|
||||
max_displacement_mm: number | null;
|
||||
max_stress_mpa: number | null;
|
||||
success: boolean;
|
||||
error: string | null;
|
||||
} | null;
|
||||
}
|
||||
|
||||
export interface ModelConfig {
|
||||
nx_part?: NxPartConfig;
|
||||
prt?: NxPartConfig;
|
||||
fem?: FemConfig;
|
||||
sim?: SimConfig;
|
||||
nx_settings?: NxSettings;
|
||||
introspection?: IntrospectionData;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Design Variable Types
|
||||
// ============================================================================
|
||||
|
||||
export type DesignVariableType = 'continuous' | 'integer' | 'categorical';
|
||||
|
||||
export interface DesignVariableBounds {
|
||||
min: number;
|
||||
max: number;
|
||||
}
|
||||
|
||||
export interface DesignVariable {
|
||||
/** Unique identifier (pattern: dv_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** NX expression name (must match model) */
|
||||
expression_name: string;
|
||||
/** Variable type */
|
||||
type: DesignVariableType;
|
||||
/** Value bounds */
|
||||
bounds: DesignVariableBounds;
|
||||
/** Current/initial value */
|
||||
baseline?: number;
|
||||
/** Physical units (mm, deg, etc.) */
|
||||
units?: string;
|
||||
/** Step size for integer/discrete */
|
||||
step?: number;
|
||||
/** Whether to include in optimization */
|
||||
enabled?: boolean;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Extractor Types
|
||||
// ============================================================================
|
||||
|
||||
export type ExtractorType =
|
||||
| 'displacement'
|
||||
| 'frequency'
|
||||
| 'stress'
|
||||
| 'mass'
|
||||
| 'mass_expression'
|
||||
| 'zernike_opd'
|
||||
| 'zernike_csv'
|
||||
| 'temperature'
|
||||
| 'custom_function';
|
||||
|
||||
export interface ExtractorConfig {
|
||||
/** Inner radius for Zernike (mm) */
|
||||
inner_radius_mm?: number;
|
||||
/** Outer radius for Zernike (mm) */
|
||||
outer_radius_mm?: number;
|
||||
/** Number of Zernike modes */
|
||||
n_modes?: number;
|
||||
/** Low-order modes to filter */
|
||||
filter_low_orders?: number;
|
||||
/** Displacement unit */
|
||||
displacement_unit?: string;
|
||||
/** Reference subcase ID */
|
||||
reference_subcase?: number;
|
||||
/** NX expression name (for mass_expression) */
|
||||
expression_name?: string;
|
||||
/** Mode number (for frequency) */
|
||||
mode_number?: number;
|
||||
/** Element type (for stress) */
|
||||
element_type?: string;
|
||||
/** Result type */
|
||||
result_type?: string;
|
||||
/** Metric type */
|
||||
metric?: string;
|
||||
/** Additional config properties */
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface CustomFunction {
|
||||
/** Function name */
|
||||
name?: string;
|
||||
/** Python module path */
|
||||
module?: string;
|
||||
/** Function signature */
|
||||
signature?: string;
|
||||
/** Python source code */
|
||||
source_code?: string;
|
||||
}
|
||||
|
||||
export interface ExtractorOutput {
|
||||
/** Output name (used by objectives/constraints) */
|
||||
name: string;
|
||||
/** Specific metric (max, total, rms, etc.) */
|
||||
metric?: string;
|
||||
/** Subcase ID for this output */
|
||||
subcase?: number;
|
||||
/** Units */
|
||||
units?: string;
|
||||
}
|
||||
|
||||
export interface Extractor {
|
||||
/** Unique identifier (pattern: ext_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Extractor type */
|
||||
type: ExtractorType;
|
||||
/** Whether this is a built-in extractor */
|
||||
builtin?: boolean;
|
||||
/** Type-specific configuration */
|
||||
config?: ExtractorConfig;
|
||||
/** Custom function definition (for custom_function type) */
|
||||
function?: CustomFunction;
|
||||
/** Output values this extractor produces */
|
||||
outputs: ExtractorOutput[];
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Objective Types
|
||||
// ============================================================================
|
||||
|
||||
export type OptimizationDirection = 'minimize' | 'maximize';
|
||||
|
||||
export interface ObjectiveSource {
|
||||
/** Reference to extractor */
|
||||
extractor_id: string;
|
||||
/** Which output from the extractor */
|
||||
output_name: string;
|
||||
}
|
||||
|
||||
export interface Objective {
|
||||
/** Unique identifier (pattern: obj_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Optimization direction */
|
||||
direction: OptimizationDirection;
|
||||
/** Weight for weighted sum (multi-objective) */
|
||||
weight?: number;
|
||||
/** Where the value comes from */
|
||||
source: ObjectiveSource;
|
||||
/** Target value (for goal programming) */
|
||||
target?: number;
|
||||
/** Units */
|
||||
units?: string;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Constraint Types
|
||||
// ============================================================================
|
||||
|
||||
export type ConstraintType = 'hard' | 'soft';
|
||||
export type ConstraintOperator = '<=' | '>=' | '<' | '>' | '==';
|
||||
export type PenaltyMethod = 'linear' | 'quadratic' | 'exponential';
|
||||
|
||||
export interface ConstraintSource {
|
||||
extractor_id: string;
|
||||
output_name: string;
|
||||
}
|
||||
|
||||
export interface PenaltyConfig {
|
||||
/** Penalty method */
|
||||
method?: PenaltyMethod;
|
||||
/** Penalty weight */
|
||||
weight?: number;
|
||||
/** Soft margin before penalty kicks in */
|
||||
margin?: number;
|
||||
}
|
||||
|
||||
export interface Constraint {
|
||||
/** Unique identifier (pattern: con_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Constraint type */
|
||||
type: ConstraintType;
|
||||
/** Comparison operator */
|
||||
operator: ConstraintOperator;
|
||||
/** Constraint threshold value */
|
||||
threshold: number;
|
||||
/** Where the value comes from */
|
||||
source: ConstraintSource;
|
||||
/** Penalty method configuration */
|
||||
penalty_config?: PenaltyConfig;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Optimization Types
|
||||
// ============================================================================
|
||||
|
||||
export type AlgorithmType = 'TPE' | 'CMA-ES' | 'NSGA-II' | 'RandomSearch' | 'SAT_v3' | 'GP-BO';
|
||||
export type SurrogateType = 'MLP' | 'GNN' | 'ensemble';
|
||||
|
||||
export interface AlgorithmConfig {
|
||||
/** Population size (evolutionary algorithms) */
|
||||
population_size?: number;
|
||||
/** Number of generations */
|
||||
n_generations?: number;
|
||||
/** Mutation probability */
|
||||
mutation_prob?: number | null;
|
||||
/** Crossover probability */
|
||||
crossover_prob?: number;
|
||||
/** Random seed */
|
||||
seed?: number;
|
||||
/** Number of startup trials (TPE) */
|
||||
n_startup_trials?: number;
|
||||
/** Initial sigma (CMA-ES) */
|
||||
sigma0?: number;
|
||||
/** Additional config properties */
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface Algorithm {
|
||||
type: AlgorithmType;
|
||||
config?: AlgorithmConfig;
|
||||
}
|
||||
|
||||
export interface OptimizationBudget {
|
||||
/** Maximum number of trials */
|
||||
max_trials?: number;
|
||||
/** Maximum time in hours */
|
||||
max_time_hours?: number;
|
||||
/** Stop if no improvement for N trials */
|
||||
convergence_patience?: number;
|
||||
}
|
||||
|
||||
export interface SurrogateConfig {
|
||||
/** Number of models in ensemble */
|
||||
n_models?: number;
|
||||
/** Network architecture layers */
|
||||
architecture?: number[];
|
||||
/** Retrain every N trials */
|
||||
train_every_n_trials?: number;
|
||||
/** Minimum training samples */
|
||||
min_training_samples?: number;
|
||||
/** Acquisition function candidates */
|
||||
acquisition_candidates?: number;
|
||||
/** FEA validations per round */
|
||||
fea_validations_per_round?: number;
|
||||
}
|
||||
|
||||
export interface Surrogate {
|
||||
enabled?: boolean;
|
||||
type?: SurrogateType;
|
||||
config?: SurrogateConfig;
|
||||
}
|
||||
|
||||
export interface OptimizationConfig {
|
||||
algorithm: Algorithm;
|
||||
budget: OptimizationBudget;
|
||||
surrogate?: Surrogate;
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Workflow Types
|
||||
// ============================================================================
|
||||
|
||||
export interface WorkflowStage {
|
||||
id: string;
|
||||
name: string;
|
||||
algorithm?: string;
|
||||
trials?: number;
|
||||
purpose?: string;
|
||||
}
|
||||
|
||||
export interface WorkflowTransition {
|
||||
from: string;
|
||||
to: string;
|
||||
condition?: string;
|
||||
}
|
||||
|
||||
export interface Workflow {
|
||||
stages?: WorkflowStage[];
|
||||
transitions?: WorkflowTransition[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Reporting Types
|
||||
// ============================================================================
|
||||
|
||||
export interface InsightConfig {
|
||||
include_html?: boolean;
|
||||
show_pareto_evolution?: boolean;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface Insight {
|
||||
type?: string;
|
||||
for_trials?: string;
|
||||
config?: InsightConfig;
|
||||
}
|
||||
|
||||
export interface ReportingConfig {
|
||||
auto_report?: boolean;
|
||||
report_triggers?: string[];
|
||||
insights?: Insight[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Canvas Types
|
||||
// ============================================================================
|
||||
|
||||
export interface CanvasViewport {
|
||||
x: number;
|
||||
y: number;
|
||||
zoom: number;
|
||||
}
|
||||
|
||||
export interface CanvasEdge {
|
||||
source: string;
|
||||
target: string;
|
||||
sourceHandle?: string;
|
||||
targetHandle?: string;
|
||||
}
|
||||
|
||||
export interface CanvasGroup {
|
||||
id: string;
|
||||
name: string;
|
||||
node_ids: string[];
|
||||
}
|
||||
|
||||
export interface CanvasConfig {
|
||||
layout_version?: string;
|
||||
viewport?: CanvasViewport;
|
||||
edges?: CanvasEdge[];
|
||||
groups?: CanvasGroup[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main AtomizerSpec Type
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* AtomizerSpec v2.0 - The unified configuration schema for Atomizer optimization studies.
|
||||
*
|
||||
* This is the single source of truth used by:
|
||||
* - Canvas UI (rendering and editing)
|
||||
* - Backend API (validation and storage)
|
||||
* - Claude Assistant (reading and modifying)
|
||||
* - Optimization Engine (execution)
|
||||
*/
|
||||
export interface AtomizerSpec {
|
||||
/** Metadata about the spec */
|
||||
meta: SpecMeta;
|
||||
/** NX model files and configuration */
|
||||
model: ModelConfig;
|
||||
/** Design variables (NX expressions) to optimize */
|
||||
design_variables: DesignVariable[];
|
||||
/** Physics extractors that compute outputs from FEA results */
|
||||
extractors: Extractor[];
|
||||
/** Optimization objectives (minimize/maximize) */
|
||||
objectives: Objective[];
|
||||
/** Hard and soft constraints */
|
||||
constraints?: Constraint[];
|
||||
/** Optimization algorithm configuration */
|
||||
optimization: OptimizationConfig;
|
||||
/** Multi-stage optimization workflow */
|
||||
workflow?: Workflow;
|
||||
/** Reporting configuration */
|
||||
reporting?: ReportingConfig;
|
||||
/** Canvas UI state (persisted for reconstruction) */
|
||||
canvas?: CanvasConfig;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Utility Types for API Responses
|
||||
// ============================================================================
|
||||
|
||||
export interface SpecValidationError {
|
||||
type: 'schema' | 'semantic' | 'reference';
|
||||
path: string[];
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface SpecValidationWarning {
|
||||
type: string;
|
||||
path: string[];
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface SpecValidationReport {
|
||||
valid: boolean;
|
||||
errors: SpecValidationError[];
|
||||
warnings: SpecValidationWarning[];
|
||||
summary: {
|
||||
design_variables: number;
|
||||
extractors: number;
|
||||
objectives: number;
|
||||
constraints: number;
|
||||
custom_functions: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface SpecModification {
|
||||
operation: 'set' | 'add' | 'remove';
|
||||
path: string;
|
||||
value?: unknown;
|
||||
}
|
||||
|
||||
export interface SpecUpdateResult {
|
||||
success: boolean;
|
||||
hash: string;
|
||||
modified: string;
|
||||
modified_by: string;
|
||||
}
|
||||
|
||||
export interface SpecPatchRequest {
|
||||
path: string;
|
||||
value: unknown;
|
||||
modified_by?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Node Types for Canvas
|
||||
// ============================================================================
|
||||
|
||||
export type SpecNodeType =
|
||||
| 'designVar'
|
||||
| 'extractor'
|
||||
| 'objective'
|
||||
| 'constraint'
|
||||
| 'model'
|
||||
| 'solver'
|
||||
| 'algorithm';
|
||||
|
||||
export interface SpecNodeBase {
|
||||
id: string;
|
||||
type: SpecNodeType;
|
||||
position: CanvasPosition;
|
||||
data: Record<string, unknown>;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// WebSocket Types
|
||||
// ============================================================================
|
||||
|
||||
export type SpecSyncMessageType =
|
||||
| 'spec_updated'
|
||||
| 'validation_error'
|
||||
| 'node_added'
|
||||
| 'node_removed'
|
||||
| 'connection_ack';
|
||||
|
||||
export interface SpecSyncMessage {
|
||||
type: SpecSyncMessageType;
|
||||
timestamp: string;
|
||||
hash?: string;
|
||||
modified_by?: string;
|
||||
changes?: Array<{
|
||||
path: string;
|
||||
old: unknown;
|
||||
new: unknown;
|
||||
}>;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface SpecClientMessage {
|
||||
type: 'subscribe' | 'patch_node' | 'add_node' | 'remove_node' | 'update_position';
|
||||
study_id: string;
|
||||
node_id?: string;
|
||||
data?: Record<string, unknown>;
|
||||
position?: CanvasPosition;
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
// AtomizerSpec v2.0 types (unified configuration)
|
||||
export * from './atomizer-spec';
|
||||
|
||||
// Study types
|
||||
export interface Study {
|
||||
id: string;
|
||||
|
||||
@@ -17,18 +17,10 @@ export default defineConfig({
|
||||
}
|
||||
}
|
||||
},
|
||||
resolve: {
|
||||
alias: {
|
||||
// Use the smaller basic Plotly distribution
|
||||
'plotly.js/dist/plotly': 'plotly.js-basic-dist'
|
||||
}
|
||||
},
|
||||
build: {
|
||||
rollupOptions: {
|
||||
output: {
|
||||
manualChunks: {
|
||||
// Separate Plotly into its own chunk for better caching
|
||||
plotly: ['plotly.js-basic-dist', 'react-plotly.js'],
|
||||
// Separate React and core libs
|
||||
vendor: ['react', 'react-dom', 'react-router-dom'],
|
||||
// Recharts in its own chunk
|
||||
@@ -37,8 +29,5 @@ export default defineConfig({
|
||||
}
|
||||
},
|
||||
chunkSizeWarningLimit: 600
|
||||
},
|
||||
optimizeDeps: {
|
||||
include: ['plotly.js-basic-dist']
|
||||
}
|
||||
})
|
||||
|
||||
@@ -25,6 +25,18 @@ if not exist "%CONDA_PATH%\Scripts\activate.bat" (
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:: Stop any existing dashboard processes first
|
||||
echo [0/3] Stopping existing processes...
|
||||
taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
|
||||
taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%BACKEND_PORT% ^| findstr LISTENING') do (
|
||||
taskkill /F /PID %%a >nul 2>&1
|
||||
)
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :%FRONTEND_PORT% ^| findstr LISTENING') do (
|
||||
taskkill /F /PID %%a >nul 2>&1
|
||||
)
|
||||
ping 127.0.0.1 -n 2 >nul
|
||||
|
||||
echo [1/3] Starting Backend Server (port %BACKEND_PORT%)...
|
||||
start "Atomizer Backend" cmd /k "call %CONDA_PATH%\Scripts\activate.bat %CONDA_ENV% && cd /d %SCRIPT_DIR%backend && python -m uvicorn api.main:app --reload --port %BACKEND_PORT%"
|
||||
|
||||
|
||||
@@ -10,11 +10,11 @@ echo.
|
||||
taskkill /F /FI "WINDOWTITLE eq Atomizer Backend*" >nul 2>&1
|
||||
taskkill /F /FI "WINDOWTITLE eq Atomizer Frontend*" >nul 2>&1
|
||||
|
||||
:: Kill any remaining processes on the ports
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8000 ^| findstr LISTENING') do (
|
||||
:: Kill any remaining processes on the ports (backend: 8001, frontend: 3003)
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :8001 ^| findstr LISTENING') do (
|
||||
taskkill /F /PID %%a >nul 2>&1
|
||||
)
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :5173 ^| findstr LISTENING') do (
|
||||
for /f "tokens=5" %%a in ('netstat -ano ^| findstr :3003 ^| findstr LISTENING') do (
|
||||
taskkill /F /PID %%a >nul 2>&1
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Atomizer Documentation Index
|
||||
|
||||
**Last Updated**: 2026-01-20
|
||||
**Project Version**: 1.0.0 (AtomizerSpec v2.0 - Full LLM Integration)
|
||||
**Last Updated**: 2026-01-24
|
||||
**Project Version**: 0.5.0 (AtomizerSpec v2.0 - Canvas Builder)
|
||||
|
||||
---
|
||||
|
||||
@@ -201,6 +201,8 @@ Historical documents are preserved in `archive/`:
|
||||
- `archive/historical/` - Legacy documents, old protocols
|
||||
- `archive/marketing/` - Briefings, presentations
|
||||
- `archive/session_summaries/` - Past development sessions
|
||||
- `archive/plans/` - Superseded plan documents (RALPH_LOOP V2/V3, CANVAS V3, etc.)
|
||||
- `archive/PROTOCOL_V1_MONOLITHIC.md` - Original monolithic protocol (Nov 2025)
|
||||
|
||||
---
|
||||
|
||||
@@ -216,5 +218,5 @@ For Claude/AI integration:
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2026-01-20
|
||||
**Last Updated**: 2026-01-24
|
||||
**Maintained By**: Antoine / Atomaste
|
||||
|
||||
438
docs/plans/CANVAS_ROBUSTNESS_PLAN.md
Normal file
438
docs/plans/CANVAS_ROBUSTNESS_PLAN.md
Normal file
@@ -0,0 +1,438 @@
|
||||
# Canvas Builder Robustness & Enhancement Plan
|
||||
|
||||
**Created**: January 21, 2026
|
||||
**Branch**: `feature/studio-enhancement`
|
||||
**Status**: Planning
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This plan addresses critical issues and enhancements to make the Canvas Builder robust and production-ready:
|
||||
|
||||
1. **Panel Management** - Panels (Introspection, Config, Chat) disappear unexpectedly
|
||||
2. **Pre-run Validation** - No validation before starting optimization
|
||||
3. **Error Handling** - Poor feedback when things go wrong
|
||||
4. **Live Updates** - Polling is inefficient; need WebSocket
|
||||
5. **Visualization** - No convergence charts or progress indicators
|
||||
6. **Testing** - No automated tests for critical flows
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Panel Management System (HIGH PRIORITY)
|
||||
|
||||
### Problem
|
||||
- IntrospectionPanel disappears when user clicks elsewhere on canvas
|
||||
- Panel state is lost (e.g., introspection results, expanded sections)
|
||||
- No way to have multiple panels open simultaneously
|
||||
- Chat panel and Config panel are mutually exclusive
|
||||
|
||||
### Root Cause
|
||||
```typescript
|
||||
// Current: Local state in ModelNodeConfig (NodeConfigPanelV2.tsx:275)
|
||||
const [showIntrospection, setShowIntrospection] = useState(false);
|
||||
|
||||
// When selectedNodeId changes, ModelNodeConfig unmounts, losing state
|
||||
```
|
||||
|
||||
### Solution: Centralized Panel Store
|
||||
|
||||
Create `usePanelStore.ts` - a Zustand store for panel management:
|
||||
|
||||
```typescript
|
||||
// atomizer-dashboard/frontend/src/hooks/usePanelStore.ts
|
||||
|
||||
interface PanelState {
|
||||
// Panel visibility
|
||||
panels: {
|
||||
introspection: { open: boolean; filePath?: string; data?: IntrospectionResult };
|
||||
config: { open: boolean; nodeId?: string };
|
||||
chat: { open: boolean; powerMode: boolean };
|
||||
validation: { open: boolean; errors?: ValidationError[] };
|
||||
results: { open: boolean; trialId?: number };
|
||||
};
|
||||
|
||||
// Actions
|
||||
openPanel: (panel: PanelName, data?: any) => void;
|
||||
closePanel: (panel: PanelName) => void;
|
||||
togglePanel: (panel: PanelName) => void;
|
||||
|
||||
// Panel data persistence
|
||||
setIntrospectionData: (data: IntrospectionResult) => void;
|
||||
clearIntrospectionData: () => void;
|
||||
}
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 1.1 | `usePanelStore.ts` | Create Zustand store for panel state |
|
||||
| 1.2 | `PanelContainer.tsx` | Create container that renders open panels |
|
||||
| 1.3 | `IntrospectionPanel.tsx` | Refactor to use store instead of local state |
|
||||
| 1.4 | `NodeConfigPanelV2.tsx` | Remove local panel state, use store |
|
||||
| 1.5 | `CanvasView.tsx` | Integrate PanelContainer, remove chat panel logic |
|
||||
| 1.6 | `SpecRenderer.tsx` | Add panel trigger buttons (introspect, validate) |
|
||||
|
||||
### UI Changes
|
||||
|
||||
**Before:**
|
||||
```
|
||||
[Canvas] [Config Panel OR Chat Panel]
|
||||
↑ mutually exclusive
|
||||
```
|
||||
|
||||
**After:**
|
||||
```
|
||||
[Canvas] [Right Panel Area]
|
||||
├── Config Panel (pinnable)
|
||||
├── Chat Panel (collapsible)
|
||||
└── Floating Panels:
|
||||
├── Introspection (draggable, persistent)
|
||||
├── Validation Results
|
||||
└── Trial Details
|
||||
```
|
||||
|
||||
### Panel Behaviors
|
||||
|
||||
| Panel | Trigger | Persistence | Position |
|
||||
|-------|---------|-------------|----------|
|
||||
| **Config** | Node click | While node selected | Right sidebar |
|
||||
| **Chat** | Toggle button | Always available | Right sidebar (below config) |
|
||||
| **Introspection** | "Introspect" button | Until explicitly closed | Floating, draggable |
|
||||
| **Validation** | "Validate" or pre-run | Until fixed or dismissed | Floating |
|
||||
| **Results** | Click on result badge | Until dismissed | Floating |
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Pre-run Validation (HIGH PRIORITY)
|
||||
|
||||
### Problem
|
||||
- User can click "Run" with incomplete spec
|
||||
- No feedback about missing extractors, objectives, or connections
|
||||
- Optimization fails silently or with cryptic errors
|
||||
|
||||
### Solution: Validation Pipeline
|
||||
|
||||
```typescript
|
||||
// Types of validation
|
||||
interface ValidationResult {
|
||||
valid: boolean;
|
||||
errors: ValidationError[]; // Must fix before running
|
||||
warnings: ValidationWarning[]; // Can proceed but risky
|
||||
}
|
||||
|
||||
interface ValidationError {
|
||||
code: string;
|
||||
severity: 'error' | 'warning';
|
||||
path: string; // e.g., "objectives[0]"
|
||||
message: string;
|
||||
suggestion?: string;
|
||||
autoFix?: () => void;
|
||||
}
|
||||
```
|
||||
|
||||
### Validation Rules
|
||||
|
||||
| Rule | Severity | Message |
|
||||
|------|----------|---------|
|
||||
| No design variables | Error | "Add at least one design variable" |
|
||||
| No objectives | Error | "Add at least one objective" |
|
||||
| Objective not connected to extractor | Error | "Objective '{name}' has no source extractor" |
|
||||
| Extractor type not set | Error | "Extractor '{name}' needs a type selected" |
|
||||
| Design var bounds invalid | Error | "Min must be less than max for '{name}'" |
|
||||
| No model file | Error | "No simulation file configured" |
|
||||
| Custom extractor no code | Warning | "Custom extractor '{name}' has no code" |
|
||||
| High trial count (>500) | Warning | "Large budget may take hours to complete" |
|
||||
| Single trial | Warning | "Only 1 trial - results won't be meaningful" |
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 2.1 | `validation/specValidator.ts` | Client-side validation rules |
|
||||
| 2.2 | `ValidationPanel.tsx` | Display validation results |
|
||||
| 2.3 | `SpecRenderer.tsx` | Add "Validate" button, pre-run check |
|
||||
| 2.4 | `api/routes/spec.py` | Server-side validation endpoint |
|
||||
| 2.5 | `useSpecStore.ts` | Add `validate()` action |
|
||||
|
||||
### UI Flow
|
||||
|
||||
```
|
||||
User clicks "Run Optimization"
|
||||
↓
|
||||
[Validate Spec] ──failed──→ [Show ValidationPanel]
|
||||
↓ passed │
|
||||
[Confirm Dialog] │
|
||||
↓ confirmed │
|
||||
[Start Optimization] ←── fix ─────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Error Handling & Recovery (HIGH PRIORITY)
|
||||
|
||||
### Problem
|
||||
- NX crashes don't show useful feedback
|
||||
- Solver failures leave user confused
|
||||
- No way to resume after errors
|
||||
|
||||
### Solution: Error Classification & Display
|
||||
|
||||
```typescript
|
||||
interface OptimizationError {
|
||||
type: 'nx_crash' | 'solver_fail' | 'extractor_error' | 'config_error' | 'system_error';
|
||||
trial?: number;
|
||||
message: string;
|
||||
details?: string;
|
||||
recoverable: boolean;
|
||||
suggestions: string[];
|
||||
}
|
||||
```
|
||||
|
||||
### Error Handling Strategy
|
||||
|
||||
| Error Type | Display | Recovery |
|
||||
|------------|---------|----------|
|
||||
| NX Crash | Toast + Error Panel | Retry trial, skip trial |
|
||||
| Solver Failure | Badge on trial | Mark infeasible, continue |
|
||||
| Extractor Error | Log + badge | Use NaN, continue |
|
||||
| Config Error | Block run | Show validation panel |
|
||||
| System Error | Full modal | Restart optimization |
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 3.1 | `ErrorBoundary.tsx` | Wrap canvas in error boundary |
|
||||
| 3.2 | `ErrorPanel.tsx` | Detailed error display with suggestions |
|
||||
| 3.3 | `optimization.py` | Enhanced error responses with type/recovery |
|
||||
| 3.4 | `SpecRenderer.tsx` | Error state handling, retry buttons |
|
||||
| 3.5 | `useOptimizationStatus.ts` | Hook for status polling with error handling |
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Live Updates via WebSocket (MEDIUM PRIORITY)
|
||||
|
||||
### Problem
|
||||
- Current polling (3s) is inefficient and has latency
|
||||
- Missed updates between polls
|
||||
- No real-time progress indication
|
||||
|
||||
### Solution: WebSocket for Trial Updates
|
||||
|
||||
```typescript
|
||||
// WebSocket events
|
||||
interface TrialStartEvent {
|
||||
type: 'trial_start';
|
||||
trial_number: number;
|
||||
params: Record<string, number>;
|
||||
}
|
||||
|
||||
interface TrialCompleteEvent {
|
||||
type: 'trial_complete';
|
||||
trial_number: number;
|
||||
objectives: Record<string, number>;
|
||||
is_best: boolean;
|
||||
is_feasible: boolean;
|
||||
}
|
||||
|
||||
interface OptimizationCompleteEvent {
|
||||
type: 'optimization_complete';
|
||||
best_trial: number;
|
||||
total_trials: number;
|
||||
}
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 4.1 | `websocket.py` | Add optimization events to WS |
|
||||
| 4.2 | `run_optimization.py` | Emit events during optimization |
|
||||
| 4.3 | `useOptimizationWebSocket.ts` | Hook for WS subscription |
|
||||
| 4.4 | `SpecRenderer.tsx` | Use WS instead of polling |
|
||||
| 4.5 | `ResultBadge.tsx` | Animate on new results |
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Convergence Visualization (MEDIUM PRIORITY)
|
||||
|
||||
### Problem
|
||||
- No visual feedback on optimization progress
|
||||
- Can't tell if converging or stuck
|
||||
- No Pareto front visualization for multi-objective
|
||||
|
||||
### Solution: Embedded Charts
|
||||
|
||||
### Components
|
||||
|
||||
| Component | Description |
|
||||
|-----------|-------------|
|
||||
| `ConvergenceSparkline` | Tiny chart in ObjectiveNode showing trend |
|
||||
| `ProgressRing` | Circular progress in header (trials/total) |
|
||||
| `ConvergenceChart` | Full chart in Results panel |
|
||||
| `ParetoPlot` | 2D Pareto front for multi-objective |
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 5.1 | `ConvergenceSparkline.tsx` | SVG sparkline component |
|
||||
| 5.2 | `ObjectiveNode.tsx` | Integrate sparkline |
|
||||
| 5.3 | `ProgressRing.tsx` | Circular progress indicator |
|
||||
| 5.4 | `ConvergenceChart.tsx` | Full chart with Recharts |
|
||||
| 5.5 | `ResultsPanel.tsx` | Panel showing detailed results |
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: End-to-End Testing (MEDIUM PRIORITY)
|
||||
|
||||
### Problem
|
||||
- No automated tests for canvas operations
|
||||
- Manual testing is time-consuming and error-prone
|
||||
- Regressions go unnoticed
|
||||
|
||||
### Solution: Playwright E2E Tests
|
||||
|
||||
### Test Scenarios
|
||||
|
||||
| Test | Steps | Assertions |
|
||||
|------|-------|------------|
|
||||
| Load study | Navigate to /canvas/{id} | Spec loads, nodes render |
|
||||
| Add design var | Drag from palette | Node appears, spec updates |
|
||||
| Connect nodes | Drag edge | Edge renders, spec has edge |
|
||||
| Edit node | Click node, change value | Value persists, API called |
|
||||
| Run validation | Click validate | Errors shown for incomplete |
|
||||
| Start optimization | Complete spec, click run | Status shows running |
|
||||
| View results | Wait for trial | Badge shows value |
|
||||
| Stop optimization | Click stop | Status shows stopped |
|
||||
|
||||
### Implementation Tasks
|
||||
|
||||
| Task | File | Description |
|
||||
|------|------|-------------|
|
||||
| 6.1 | `e2e/canvas.spec.ts` | Basic canvas operations |
|
||||
| 6.2 | `e2e/optimization.spec.ts` | Run/stop/status flow |
|
||||
| 6.3 | `e2e/panels.spec.ts` | Panel open/close/persist |
|
||||
| 6.4 | `playwright.config.ts` | Configure Playwright |
|
||||
| 6.5 | `CI workflow` | Run tests in GitHub Actions |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
```
|
||||
Week 1:
|
||||
├── Phase 1: Panel Management (critical UX fix)
|
||||
│ ├── Day 1-2: usePanelStore + PanelContainer
|
||||
│ └── Day 3-4: Refactor existing panels
|
||||
│
|
||||
├── Phase 2: Validation (prevent user errors)
|
||||
│ └── Day 5: Validation rules + UI
|
||||
|
||||
Week 2:
|
||||
├── Phase 3: Error Handling
|
||||
│ ├── Day 1-2: Error types + ErrorPanel
|
||||
│ └── Day 3: Integration with optimization flow
|
||||
│
|
||||
├── Phase 4: WebSocket Updates
|
||||
│ └── Day 4-5: WS events + frontend hook
|
||||
|
||||
Week 3:
|
||||
├── Phase 5: Visualization
|
||||
│ ├── Day 1-2: Sparklines
|
||||
│ └── Day 3: Progress indicators
|
||||
│
|
||||
├── Phase 6: Testing
|
||||
│ └── Day 4-5: Playwright setup + core tests
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quick Wins (Can Do Now)
|
||||
|
||||
These can be implemented immediately with minimal changes:
|
||||
|
||||
1. **Persist introspection data in localStorage**
|
||||
- Cache introspection results
|
||||
- Restore on panel reopen
|
||||
|
||||
2. **Add loading states to all buttons**
|
||||
- Disable during operations
|
||||
- Show spinners
|
||||
|
||||
3. **Add confirmation dialogs**
|
||||
- Before stopping optimization
|
||||
- Before clearing canvas
|
||||
|
||||
4. **Improve error messages**
|
||||
- Parse NX error logs
|
||||
- Show actionable suggestions
|
||||
|
||||
---
|
||||
|
||||
## Files to Create/Modify
|
||||
|
||||
### New Files
|
||||
```
|
||||
atomizer-dashboard/frontend/src/
|
||||
├── hooks/
|
||||
│ ├── usePanelStore.ts
|
||||
│ └── useOptimizationWebSocket.ts
|
||||
├── components/canvas/
|
||||
│ ├── PanelContainer.tsx
|
||||
│ ├── panels/
|
||||
│ │ ├── ValidationPanel.tsx
|
||||
│ │ ├── ErrorPanel.tsx
|
||||
│ │ └── ResultsPanel.tsx
|
||||
│ └── visualization/
|
||||
│ ├── ConvergenceSparkline.tsx
|
||||
│ ├── ProgressRing.tsx
|
||||
│ └── ConvergenceChart.tsx
|
||||
└── lib/
|
||||
└── validation/
|
||||
└── specValidator.ts
|
||||
|
||||
e2e/
|
||||
├── canvas.spec.ts
|
||||
├── optimization.spec.ts
|
||||
└── panels.spec.ts
|
||||
```
|
||||
|
||||
### Modified Files
|
||||
```
|
||||
atomizer-dashboard/frontend/src/
|
||||
├── pages/CanvasView.tsx
|
||||
├── components/canvas/SpecRenderer.tsx
|
||||
├── components/canvas/panels/IntrospectionPanel.tsx
|
||||
├── components/canvas/panels/NodeConfigPanelV2.tsx
|
||||
├── components/canvas/nodes/ObjectiveNode.tsx
|
||||
└── hooks/useSpecStore.ts
|
||||
|
||||
atomizer-dashboard/backend/api/
|
||||
├── routes/optimization.py
|
||||
├── routes/spec.py
|
||||
└── websocket.py
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
| Phase | Success Metric |
|
||||
|-------|----------------|
|
||||
| 1 | Introspection panel persists across node selections |
|
||||
| 2 | Invalid spec shows clear error before run |
|
||||
| 3 | NX errors display with recovery options |
|
||||
| 4 | Results update within 500ms of trial completion |
|
||||
| 5 | Convergence trend visible on objective nodes |
|
||||
| 6 | All E2E tests pass in CI |
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Review this plan
|
||||
2. Start with Phase 1 (Panel Management) - fixes your immediate issue
|
||||
3. Implement incrementally, commit after each phase
|
||||
445
docs/plans/CANVAS_UX_IMPROVEMENTS.md
Normal file
445
docs/plans/CANVAS_UX_IMPROVEMENTS.md
Normal file
@@ -0,0 +1,445 @@
|
||||
# Canvas UX Improvements - Master Plan
|
||||
|
||||
**Created:** January 2026
|
||||
**Status:** Planning
|
||||
**Branch:** `feature/studio-enhancement`
|
||||
|
||||
## Overview
|
||||
|
||||
This plan addresses three major UX issues in the Canvas Builder:
|
||||
|
||||
1. **Resizable Panels** - Right pane (chat/config) is fixed at 384px, cannot be adjusted
|
||||
2. **Disabled Palette Items** - Model, Solver, Algorithm, Surrogate are grayed out and not draggable
|
||||
3. **Solver Type Selection** - Solver node should allow selection of solver type (NX Nastran, Python, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Phase 7: Resizable Panels
|
||||
|
||||
### Current State
|
||||
- Left sidebar: Fixed 240px (expanded) or 56px (collapsed)
|
||||
- Right panel (Chat/Config): Fixed 384px
|
||||
- Canvas: Takes remaining space
|
||||
|
||||
### Requirements
|
||||
- Users should be able to drag panel edges to resize
|
||||
- Minimum/maximum constraints for usability
|
||||
- Persist panel sizes in localStorage
|
||||
- Smooth resize with proper cursor feedback
|
||||
|
||||
### Implementation
|
||||
|
||||
#### 7.1 Create Resizable Panel Hook
|
||||
```typescript
|
||||
// hooks/useResizablePanel.ts
|
||||
interface ResizablePanelState {
|
||||
width: number;
|
||||
isDragging: boolean;
|
||||
startDrag: (e: React.MouseEvent) => void;
|
||||
}
|
||||
|
||||
function useResizablePanel(
|
||||
key: string,
|
||||
defaultWidth: number,
|
||||
minWidth: number,
|
||||
maxWidth: number
|
||||
): ResizablePanelState
|
||||
```
|
||||
|
||||
#### 7.2 Update CanvasView Layout
|
||||
- Wrap left sidebar with resizer
|
||||
- Wrap right panel with resizer
|
||||
- Add visual drag handles (thin border that highlights on hover)
|
||||
- Add cursor: col-resize on hover
|
||||
|
||||
#### 7.3 Files to Modify
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `hooks/useResizablePanel.ts` | NEW - Resize hook with localStorage persistence |
|
||||
| `pages/CanvasView.tsx` | Add resizers to left/right panels |
|
||||
| `components/canvas/ResizeHandle.tsx` | NEW - Visual resize handle component |
|
||||
|
||||
#### 7.4 Constraints
|
||||
| Panel | Min | Default | Max |
|
||||
|-------|-----|---------|-----|
|
||||
| Left (Palette/Files) | 200px | 240px | 400px |
|
||||
| Right (Chat/Config) | 280px | 384px | 600px |
|
||||
|
||||
---
|
||||
|
||||
## Phase 8: Enable All Palette Items
|
||||
|
||||
### Current State
|
||||
- Model, Solver, Algorithm, Surrogate are marked `canAdd: false`
|
||||
- They appear grayed out with "Auto-created" text
|
||||
- Users cannot drag them to canvas
|
||||
|
||||
### Problem Analysis
|
||||
These nodes were marked as "synthetic" because they're derived from:
|
||||
- **Model**: From `spec.model.sim.path`
|
||||
- **Solver**: From model's solution type
|
||||
- **Algorithm**: From `spec.optimization.algorithm`
|
||||
- **Surrogate**: From `spec.optimization.surrogate`
|
||||
|
||||
However, users need to:
|
||||
1. Add a Model node when creating a new study from scratch
|
||||
2. Configure the Solver type
|
||||
3. Choose an Algorithm
|
||||
4. Enable/configure Surrogate
|
||||
|
||||
### Solution: Make All Items Draggable
|
||||
|
||||
#### 8.1 Update NodePalette
|
||||
```typescript
|
||||
// All items should be draggable
|
||||
export const PALETTE_ITEMS: PaletteItem[] = [
|
||||
{
|
||||
type: 'model',
|
||||
label: 'Model',
|
||||
canAdd: true, // Changed from false
|
||||
description: 'NX/FEM model file',
|
||||
},
|
||||
{
|
||||
type: 'solver',
|
||||
label: 'Solver',
|
||||
canAdd: true, // Changed from false
|
||||
description: 'Analysis solver',
|
||||
},
|
||||
// ... etc
|
||||
];
|
||||
```
|
||||
|
||||
#### 8.2 Handle "Singleton" Nodes
|
||||
Some nodes should only exist once on the canvas:
|
||||
- Model (only one model per study)
|
||||
- Solver (one solver)
|
||||
- Algorithm (one algorithm config)
|
||||
- Surrogate (optional, one)
|
||||
|
||||
When user drags a singleton that already exists:
|
||||
- Option A: Show warning toast "Model already exists"
|
||||
- Option B: Select the existing node instead of creating new
|
||||
- **Recommended**: Option B (select existing)
|
||||
|
||||
#### 8.3 Update SpecRenderer Drop Handler
|
||||
```typescript
|
||||
const onDrop = useCallback(async (event: DragEvent) => {
|
||||
const type = event.dataTransfer.getData('application/reactflow');
|
||||
|
||||
// Check if singleton already exists
|
||||
const SINGLETON_TYPES = ['model', 'solver', 'algorithm', 'surrogate'];
|
||||
if (SINGLETON_TYPES.includes(type)) {
|
||||
const existingNode = nodes.find(n => n.type === type);
|
||||
if (existingNode) {
|
||||
selectNode(existingNode.id);
|
||||
showNotification(`${type} already exists - selected it`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Create new node...
|
||||
}, [...]);
|
||||
```
|
||||
|
||||
#### 8.4 Default Data for New Node Types
|
||||
```typescript
|
||||
function getDefaultNodeData(type: NodeType, position) {
|
||||
switch (type) {
|
||||
case 'model':
|
||||
return {
|
||||
name: 'Model',
|
||||
sim: { path: '', solver: 'nastran' },
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'solver':
|
||||
return {
|
||||
name: 'Solver',
|
||||
type: 'nxnastran', // Default solver
|
||||
solution_type: 'SOL101',
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'algorithm':
|
||||
return {
|
||||
name: 'Algorithm',
|
||||
type: 'TPE',
|
||||
budget: { max_trials: 100 },
|
||||
canvas_position: position,
|
||||
};
|
||||
case 'surrogate':
|
||||
return {
|
||||
name: 'Surrogate',
|
||||
enabled: false,
|
||||
model_type: 'MLP',
|
||||
min_trials: 20,
|
||||
canvas_position: position,
|
||||
};
|
||||
// ... existing cases
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 8.5 Files to Modify
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `components/canvas/palette/NodePalette.tsx` | Set `canAdd: true` for all items |
|
||||
| `components/canvas/SpecRenderer.tsx` | Handle singleton logic in onDrop |
|
||||
| `lib/spec/converter.ts` | Ensure synthetic nodes have proper IDs |
|
||||
| `hooks/useSpecStore.ts` | Add model/solver/algorithm to addNode support |
|
||||
|
||||
---
|
||||
|
||||
## Phase 9: Solver Type Selection
|
||||
|
||||
### Current State
|
||||
- Solver node shows auto-detected solution type (SOL101, etc.)
|
||||
- No ability to change solver engine or configure it
|
||||
|
||||
### Requirements
|
||||
1. Allow selection of solver engine type
|
||||
2. Configure solution type
|
||||
3. Support future solver types
|
||||
|
||||
### Solver Types to Support
|
||||
|
||||
| Solver | Description | Status |
|
||||
|--------|-------------|--------|
|
||||
| `nxnastran` | NX Nastran (built-in) | Current |
|
||||
| `mscnastran` | MSC Nastran (external) | Future |
|
||||
| `python` | Python-based solver | Future |
|
||||
| `abaqus` | Abaqus (via Python API) | Future |
|
||||
| `ansys` | ANSYS (via Python API) | Future |
|
||||
|
||||
### Solution Types per Solver
|
||||
|
||||
**NX Nastran / MSC Nastran:**
|
||||
- SOL101 - Linear Static
|
||||
- SOL103 - Normal Modes
|
||||
- SOL105 - Buckling
|
||||
- SOL106 - Nonlinear Static
|
||||
- SOL111 - Frequency Response
|
||||
- SOL112 - Transient Response
|
||||
- SOL200 - Design Optimization
|
||||
|
||||
**Python Solver:**
|
||||
- Custom (user-defined)
|
||||
|
||||
### Schema Updates
|
||||
|
||||
#### 9.1 Update AtomizerSpec Types
|
||||
```typescript
|
||||
// types/atomizer-spec.ts
|
||||
|
||||
export type SolverEngine =
|
||||
| 'nxnastran'
|
||||
| 'mscnastran'
|
||||
| 'python'
|
||||
| 'abaqus'
|
||||
| 'ansys';
|
||||
|
||||
export type NastranSolutionType =
|
||||
| 'SOL101'
|
||||
| 'SOL103'
|
||||
| 'SOL105'
|
||||
| 'SOL106'
|
||||
| 'SOL111'
|
||||
| 'SOL112'
|
||||
| 'SOL200';
|
||||
|
||||
export interface SolverConfig {
|
||||
/** Solver engine type */
|
||||
engine: SolverEngine;
|
||||
|
||||
/** Solution type (for Nastran) */
|
||||
solution_type?: NastranSolutionType;
|
||||
|
||||
/** Custom solver script path (for Python solver) */
|
||||
script_path?: string;
|
||||
|
||||
/** Additional solver options */
|
||||
options?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface Model {
|
||||
sim?: {
|
||||
path: string;
|
||||
solver: SolverConfig; // Changed from just 'nastran' string
|
||||
};
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
#### 9.2 Update SolverNode Component
|
||||
```typescript
|
||||
// components/canvas/nodes/SolverNode.tsx
|
||||
|
||||
function SolverNodeComponent(props: NodeProps<SolverNodeData>) {
|
||||
const { data } = props;
|
||||
|
||||
return (
|
||||
<BaseNode {...props} icon={<Cpu size={16} />} iconColor="text-violet-400">
|
||||
<div className="flex flex-col gap-1">
|
||||
<span className="text-sm font-medium">{data.engine || 'nxnastran'}</span>
|
||||
<span className="text-xs text-dark-400">
|
||||
{data.solution_type || 'Auto-detect'}
|
||||
</span>
|
||||
</div>
|
||||
</BaseNode>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
#### 9.3 Solver Configuration Panel
|
||||
Add to `NodeConfigPanelV2.tsx`:
|
||||
|
||||
```typescript
|
||||
function SolverNodeConfig({ spec }: SpecConfigProps) {
|
||||
const { patchSpec } = useSpecStore();
|
||||
const solver = spec.model?.sim?.solver || { engine: 'nxnastran' };
|
||||
|
||||
const handleEngineChange = (engine: SolverEngine) => {
|
||||
patchSpec('model.sim.solver.engine', engine);
|
||||
};
|
||||
|
||||
const handleSolutionTypeChange = (type: NastranSolutionType) => {
|
||||
patchSpec('model.sim.solver.solution_type', type);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div>
|
||||
<label className={labelClass}>Solver Engine</label>
|
||||
<select
|
||||
value={solver.engine}
|
||||
onChange={(e) => handleEngineChange(e.target.value as SolverEngine)}
|
||||
className={selectClass}
|
||||
>
|
||||
<option value="nxnastran">NX Nastran</option>
|
||||
<option value="mscnastran">MSC Nastran</option>
|
||||
<option value="python">Python Script</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{(solver.engine === 'nxnastran' || solver.engine === 'mscnastran') && (
|
||||
<div>
|
||||
<label className={labelClass}>Solution Type</label>
|
||||
<select
|
||||
value={solver.solution_type || ''}
|
||||
onChange={(e) => handleSolutionTypeChange(e.target.value as NastranSolutionType)}
|
||||
className={selectClass}
|
||||
>
|
||||
<option value="">Auto-detect from model</option>
|
||||
<option value="SOL101">SOL101 - Linear Static</option>
|
||||
<option value="SOL103">SOL103 - Normal Modes</option>
|
||||
<option value="SOL105">SOL105 - Buckling</option>
|
||||
<option value="SOL106">SOL106 - Nonlinear Static</option>
|
||||
<option value="SOL111">SOL111 - Frequency Response</option>
|
||||
<option value="SOL112">SOL112 - Transient Response</option>
|
||||
</select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{solver.engine === 'python' && (
|
||||
<div>
|
||||
<label className={labelClass}>Solver Script</label>
|
||||
<input
|
||||
type="text"
|
||||
value={solver.script_path || ''}
|
||||
onChange={(e) => patchSpec('model.sim.solver.script_path', e.target.value)}
|
||||
placeholder="/path/to/solver.py"
|
||||
className={inputClass}
|
||||
/>
|
||||
<p className="text-xs text-dark-500 mt-1">
|
||||
Python script that runs the analysis
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
#### 9.4 Files to Modify
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `types/atomizer-spec.ts` | Add SolverEngine, SolverConfig types |
|
||||
| `components/canvas/nodes/SolverNode.tsx` | Show engine and solution type |
|
||||
| `components/canvas/panels/NodeConfigPanelV2.tsx` | Add SolverNodeConfig |
|
||||
| `lib/canvas/schema.ts` | Update SolverNodeData |
|
||||
| Backend: `config/spec_models.py` | Add SolverConfig Pydantic model |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
| Phase | Effort | Priority | Dependencies |
|
||||
|-------|--------|----------|--------------|
|
||||
| **7.1** Resizable Panel Hook | 2h | High | None |
|
||||
| **7.2** CanvasView Resizers | 2h | High | 7.1 |
|
||||
| **8.1** Enable Palette Items | 1h | High | None |
|
||||
| **8.2** Singleton Logic | 2h | High | 8.1 |
|
||||
| **8.3** Default Node Data | 1h | High | 8.2 |
|
||||
| **9.1** Schema Updates | 2h | Medium | None |
|
||||
| **9.2** SolverNode UI | 1h | Medium | 9.1 |
|
||||
| **9.3** Solver Config Panel | 2h | Medium | 9.1, 9.2 |
|
||||
|
||||
**Total Estimated Effort:** ~13 hours
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Phase 7 (Resizable Panels)
|
||||
- [ ] Left panel can be resized between 200-400px
|
||||
- [ ] Right panel can be resized between 280-600px
|
||||
- [ ] Resize handles show cursor feedback
|
||||
- [ ] Panel sizes persist across page reload
|
||||
- [ ] Double-click on handle resets to default
|
||||
|
||||
### Phase 8 (Enable Palette Items)
|
||||
- [ ] All 8 node types are draggable from palette
|
||||
- [ ] Dragging singleton to canvas with existing node selects existing
|
||||
- [ ] Toast notification explains the behavior
|
||||
- [ ] New studies can start with empty canvas and add Model first
|
||||
|
||||
### Phase 9 (Solver Selection)
|
||||
- [ ] Solver node shows engine type (nxnastran, python, etc.)
|
||||
- [ ] Clicking solver node opens config panel
|
||||
- [ ] Can select solver engine from dropdown
|
||||
- [ ] Nastran solvers show solution type dropdown
|
||||
- [ ] Python solver shows script path input
|
||||
- [ ] Changes persist to atomizer_spec.json
|
||||
|
||||
---
|
||||
|
||||
## Future Considerations
|
||||
|
||||
### Additional Solver Support
|
||||
- ANSYS integration via pyANSYS
|
||||
- Abaqus integration via abaqus-python
|
||||
- OpenFOAM for CFD
|
||||
- Custom Python solvers with standardized interface
|
||||
|
||||
### Multi-Solver Workflows
|
||||
- Support for chained solvers (thermal → structural)
|
||||
- Co-simulation workflows
|
||||
- Parallel solver execution
|
||||
|
||||
### Algorithm Node Enhancement
|
||||
- Similar to Solver, allow algorithm selection
|
||||
- Show algorithm-specific parameters
|
||||
- Support custom algorithms
|
||||
|
||||
---
|
||||
|
||||
## Commit Strategy
|
||||
|
||||
```bash
|
||||
# Phase 7
|
||||
git commit -m "feat: Add resizable panels to canvas view"
|
||||
|
||||
# Phase 8
|
||||
git commit -m "feat: Enable all palette items with singleton handling"
|
||||
|
||||
# Phase 9
|
||||
git commit -m "feat: Add solver type selection and configuration"
|
||||
```
|
||||
@@ -19,14 +19,14 @@ Atomizer is a structural optimization platform that enables engineers to optimiz
|
||||
|
||||
### Architecture Quality Score: **8.5/10**
|
||||
|
||||
| Aspect | Score | Notes |
|
||||
|--------|-------|-------|
|
||||
| Data Integrity | 9/10 | Single source of truth, hash-based conflict detection |
|
||||
| Type Safety | 9/10 | Pydantic models throughout backend |
|
||||
| Extensibility | 8/10 | Custom extractors, algorithms supported |
|
||||
| Performance | 8/10 | Optimistic updates, WebSocket streaming |
|
||||
| Maintainability | 8/10 | Clear separation of concerns |
|
||||
| Documentation | 7/10 | Good inline docs, needs more high-level guides |
|
||||
| Aspect | Score | Notes |
|
||||
| --------------- | ----- | ----------------------------------------------------- |
|
||||
| Data Integrity | 9/10 | Single source of truth, hash-based conflict detection |
|
||||
| Type Safety | 9/10 | Pydantic models throughout backend |
|
||||
| Extensibility | 8/10 | Custom extractors, algorithms supported |
|
||||
| Performance | 8/10 | Optimistic updates, WebSocket streaming |
|
||||
| Maintainability | 8/10 | Clear separation of concerns |
|
||||
| Documentation | 7/10 | Good inline docs, needs more high-level guides |
|
||||
|
||||
---
|
||||
|
||||
|
||||
49
examples/README.md
Normal file
49
examples/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Atomizer Examples
|
||||
|
||||
This directory contains example configurations and scripts demonstrating Atomizer capabilities.
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `optimization_config_neural.json` | Neural surrogate-accelerated optimization |
|
||||
| `optimization_config_protocol10.json` | IMSO (Intelligent Multi-Stage Optimization) example |
|
||||
| `optimization_config_protocol12.json` | Custom extractor with Zernike analysis |
|
||||
| `optimization_config_zernike_mirror.json` | Telescope mirror WFE optimization |
|
||||
|
||||
## Scripts
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `llm_mode_simple_example.py` | Basic LLM-driven optimization setup |
|
||||
| `interactive_research_session.py` | Interactive research mode with visualization |
|
||||
|
||||
## Models
|
||||
|
||||
The `Models/` directory contains sample FEA models for testing:
|
||||
- Bracket geometries
|
||||
- Beam structures
|
||||
- Mirror assemblies
|
||||
|
||||
## Zernike Reference
|
||||
|
||||
The `Zernike_old_reference/` directory contains legacy Zernike extraction code for reference purposes.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Copy a configuration file to your study directory
|
||||
2. Modify paths and parameters for your model
|
||||
3. Run optimization with:
|
||||
|
||||
```bash
|
||||
cd studies/your_study
|
||||
python run_optimization.py
|
||||
```
|
||||
|
||||
Or use the Canvas Builder in the dashboard (http://localhost:3003).
|
||||
|
||||
## See Also
|
||||
|
||||
- [Study Creation Guide](../docs/protocols/operations/OP_01_CREATE_STUDY.md)
|
||||
- [Extractor Library](../docs/protocols/system/SYS_12_EXTRACTOR_LIBRARY.md)
|
||||
- [Canvas Builder](../docs/guides/CANVAS.md)
|
||||
@@ -22,6 +22,7 @@ import { analysisTools } from "./tools/analysis.js";
|
||||
import { reportingTools } from "./tools/reporting.js";
|
||||
import { physicsTools } from "./tools/physics.js";
|
||||
import { canvasTools } from "./tools/canvas.js";
|
||||
import { specTools } from "./tools/spec.js";
|
||||
import { adminTools } from "./tools/admin.js";
|
||||
import { ATOMIZER_MODE } from "./utils/paths.js";
|
||||
|
||||
@@ -52,6 +53,7 @@ const userTools: AtomizerTool[] = [
|
||||
...reportingTools,
|
||||
...physicsTools,
|
||||
...canvasTools,
|
||||
...specTools,
|
||||
];
|
||||
|
||||
const powerTools: AtomizerTool[] = [
|
||||
|
||||
1175
mcp-server/atomizer-tools/src/tools/spec.ts
Normal file
1175
mcp-server/atomizer-tools/src/tools/spec.ts
Normal file
File diff suppressed because it is too large
Load Diff
380
nx_journals/introspect_sim.py
Normal file
380
nx_journals/introspect_sim.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
NX Journal: SIM File Introspection Tool
|
||||
=========================================
|
||||
|
||||
This journal performs deep introspection of an NX .sim file and extracts:
|
||||
- Solutions (name, type, solver)
|
||||
- Boundary conditions (SPCs, loads, etc.)
|
||||
- Subcases
|
||||
- Linked FEM files
|
||||
- Solution properties
|
||||
|
||||
Usage:
|
||||
run_journal.exe introspect_sim.py <sim_file_path> [output_dir]
|
||||
|
||||
Output:
|
||||
_introspection_sim.json - JSON with all extracted data
|
||||
|
||||
Author: Atomizer
|
||||
Created: 2026-01-20
|
||||
Version: 1.0
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import NXOpen
|
||||
import NXOpen.CAE
|
||||
|
||||
|
||||
def get_solutions(simSimulation):
|
||||
"""Extract all solutions from the simulation."""
|
||||
solutions = []
|
||||
|
||||
try:
|
||||
# Iterate through all solutions in the simulation
|
||||
# Solutions are accessed via FindObject with pattern "Solution[name]"
|
||||
# But we can also iterate if the simulation has a solutions collection
|
||||
|
||||
# Try to get solution info by iterating through known solution names
|
||||
# Common patterns: "Solution 1", "Solution 2", etc.
|
||||
for i in range(1, 20): # Check up to 20 solutions
|
||||
sol_name = f"Solution {i}"
|
||||
try:
|
||||
sol = simSimulation.FindObject(f"Solution[{sol_name}]")
|
||||
if sol:
|
||||
sol_info = {"name": sol_name, "type": str(type(sol).__name__), "properties": {}}
|
||||
|
||||
# Try to get common properties
|
||||
try:
|
||||
sol_info["properties"]["solver_type"] = (
|
||||
str(sol.SolverType) if hasattr(sol, "SolverType") else None
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
sol_info["properties"]["analysis_type"] = (
|
||||
str(sol.AnalysisType) if hasattr(sol, "AnalysisType") else None
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
solutions.append(sol_info)
|
||||
except:
|
||||
# Solution not found, stop looking
|
||||
if i > 5: # Give a few tries in case there are gaps
|
||||
break
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
solutions.append({"error": str(e)})
|
||||
|
||||
return solutions
|
||||
|
||||
|
||||
def get_boundary_conditions(simSimulation, workPart):
|
||||
"""Extract boundary conditions from the simulation."""
|
||||
bcs = {"constraints": [], "loads": [], "total_count": 0}
|
||||
|
||||
try:
|
||||
# Try to access BC collections through the simulation object
|
||||
# BCs are typically stored in the simulation's children
|
||||
|
||||
# Look for constraint groups
|
||||
constraint_names = [
|
||||
"Constraint Group[1]",
|
||||
"Constraint Group[2]",
|
||||
"Constraint Group[3]",
|
||||
"SPC[1]",
|
||||
"SPC[2]",
|
||||
"SPC[3]",
|
||||
"Fixed Constraint[1]",
|
||||
"Fixed Constraint[2]",
|
||||
]
|
||||
|
||||
for name in constraint_names:
|
||||
try:
|
||||
obj = simSimulation.FindObject(name)
|
||||
if obj:
|
||||
bc_info = {
|
||||
"name": name,
|
||||
"type": str(type(obj).__name__),
|
||||
}
|
||||
bcs["constraints"].append(bc_info)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Look for load groups
|
||||
load_names = [
|
||||
"Load Group[1]",
|
||||
"Load Group[2]",
|
||||
"Load Group[3]",
|
||||
"Force[1]",
|
||||
"Force[2]",
|
||||
"Pressure[1]",
|
||||
"Pressure[2]",
|
||||
"Enforced Displacement[1]",
|
||||
"Enforced Displacement[2]",
|
||||
]
|
||||
|
||||
for name in load_names:
|
||||
try:
|
||||
obj = simSimulation.FindObject(name)
|
||||
if obj:
|
||||
load_info = {
|
||||
"name": name,
|
||||
"type": str(type(obj).__name__),
|
||||
}
|
||||
bcs["loads"].append(load_info)
|
||||
except:
|
||||
pass
|
||||
|
||||
bcs["total_count"] = len(bcs["constraints"]) + len(bcs["loads"])
|
||||
|
||||
except Exception as e:
|
||||
bcs["error"] = str(e)
|
||||
|
||||
return bcs
|
||||
|
||||
|
||||
def get_sim_part_info(workPart):
|
||||
"""Extract SIM part-level information."""
|
||||
info = {"name": None, "full_path": None, "type": None, "fem_parts": [], "component_count": 0}
|
||||
|
||||
try:
|
||||
info["name"] = workPart.Name
|
||||
info["full_path"] = workPart.FullPath if hasattr(workPart, "FullPath") else None
|
||||
info["type"] = str(type(workPart).__name__)
|
||||
|
||||
# Check for component assembly (assembly FEM)
|
||||
try:
|
||||
root = workPart.ComponentAssembly.RootComponent
|
||||
if root:
|
||||
info["is_assembly"] = True
|
||||
# Count components
|
||||
try:
|
||||
children = root.GetChildren()
|
||||
info["component_count"] = len(children) if children else 0
|
||||
|
||||
# Get component names
|
||||
components = []
|
||||
for child in children[:10]: # Limit to first 10
|
||||
try:
|
||||
comp_info = {
|
||||
"name": child.Name if hasattr(child, "Name") else str(child),
|
||||
"type": str(type(child).__name__),
|
||||
}
|
||||
components.append(comp_info)
|
||||
except:
|
||||
pass
|
||||
info["components"] = components
|
||||
except:
|
||||
pass
|
||||
except:
|
||||
info["is_assembly"] = False
|
||||
|
||||
except Exception as e:
|
||||
info["error"] = str(e)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def get_cae_session_info(theSession):
|
||||
"""Get CAE session information."""
|
||||
cae_info = {"active_sim_part": None, "active_fem_part": None, "solver_types": []}
|
||||
|
||||
try:
|
||||
# Get CAE session
|
||||
caeSession = theSession.GetExportedObject("NXOpen.CAE.CaeSession")
|
||||
if caeSession:
|
||||
cae_info["cae_session_exists"] = True
|
||||
except:
|
||||
cae_info["cae_session_exists"] = False
|
||||
|
||||
return cae_info
|
||||
|
||||
|
||||
def explore_simulation_tree(simSimulation, workPart):
|
||||
"""Explore the simulation tree structure."""
|
||||
tree_info = {"simulation_objects": [], "found_types": set()}
|
||||
|
||||
# Try to enumerate objects in the simulation
|
||||
# This is exploratory - we don't know the exact API
|
||||
|
||||
try:
|
||||
# Try common child object patterns
|
||||
patterns = [
|
||||
# Solutions
|
||||
"Solution[Solution 1]",
|
||||
"Solution[Solution 2]",
|
||||
"Solution[SOLUTION 1]",
|
||||
# Subcases
|
||||
"Subcase[Subcase 1]",
|
||||
"Subcase[Subcase - Static 1]",
|
||||
# Loads/BCs
|
||||
"LoadSet[LoadSet 1]",
|
||||
"ConstraintSet[ConstraintSet 1]",
|
||||
"BoundaryCondition[1]",
|
||||
# FEM reference
|
||||
"FemPart",
|
||||
"AssyFemPart",
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
try:
|
||||
obj = simSimulation.FindObject(pattern)
|
||||
if obj:
|
||||
obj_info = {"pattern": pattern, "type": str(type(obj).__name__), "found": True}
|
||||
tree_info["simulation_objects"].append(obj_info)
|
||||
tree_info["found_types"].add(str(type(obj).__name__))
|
||||
except:
|
||||
pass
|
||||
|
||||
tree_info["found_types"] = list(tree_info["found_types"])
|
||||
|
||||
except Exception as e:
|
||||
tree_info["error"] = str(e)
|
||||
|
||||
return tree_info
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Main entry point for NX journal."""
|
||||
|
||||
if len(args) < 1:
|
||||
print("ERROR: No .sim file path provided")
|
||||
print("Usage: run_journal.exe introspect_sim.py <sim_file_path> [output_dir]")
|
||||
return False
|
||||
|
||||
sim_file_path = args[0]
|
||||
output_dir = args[1] if len(args) > 1 else os.path.dirname(sim_file_path)
|
||||
sim_filename = os.path.basename(sim_file_path)
|
||||
|
||||
print(f"[INTROSPECT-SIM] " + "=" * 60)
|
||||
print(f"[INTROSPECT-SIM] NX SIMULATION INTROSPECTION")
|
||||
print(f"[INTROSPECT-SIM] " + "=" * 60)
|
||||
print(f"[INTROSPECT-SIM] SIM File: {sim_filename}")
|
||||
print(f"[INTROSPECT-SIM] Output: {output_dir}")
|
||||
|
||||
results = {
|
||||
"sim_file": sim_filename,
|
||||
"sim_path": sim_file_path,
|
||||
"success": False,
|
||||
"error": None,
|
||||
"part_info": {},
|
||||
"solutions": [],
|
||||
"boundary_conditions": {},
|
||||
"tree_structure": {},
|
||||
"cae_info": {},
|
||||
}
|
||||
|
||||
try:
|
||||
theSession = NXOpen.Session.GetSession()
|
||||
|
||||
# Set load options
|
||||
working_dir = os.path.dirname(sim_file_path)
|
||||
theSession.Parts.LoadOptions.ComponentLoadMethod = (
|
||||
NXOpen.LoadOptions.LoadMethod.FromDirectory
|
||||
)
|
||||
theSession.Parts.LoadOptions.SetSearchDirectories([working_dir], [True])
|
||||
theSession.Parts.LoadOptions.ComponentsToLoad = NXOpen.LoadOptions.LoadComponents.All
|
||||
theSession.Parts.LoadOptions.PartLoadOption = NXOpen.LoadOptions.LoadOption.FullyLoad
|
||||
|
||||
# Open the SIM file
|
||||
print(f"[INTROSPECT-SIM] Opening SIM file...")
|
||||
basePart, partLoadStatus = theSession.Parts.OpenActiveDisplay(
|
||||
sim_file_path, NXOpen.DisplayPartOption.AllowAdditional
|
||||
)
|
||||
partLoadStatus.Dispose()
|
||||
|
||||
workPart = theSession.Parts.Work
|
||||
print(f"[INTROSPECT-SIM] Loaded: {workPart.Name}")
|
||||
|
||||
# Switch to SFEM application
|
||||
try:
|
||||
theSession.ApplicationSwitchImmediate("UG_APP_SFEM")
|
||||
print(f"[INTROSPECT-SIM] Switched to SFEM application")
|
||||
except Exception as e:
|
||||
print(f"[INTROSPECT-SIM] Note: Could not switch to SFEM: {e}")
|
||||
|
||||
# Get part info
|
||||
print(f"[INTROSPECT-SIM] Extracting part info...")
|
||||
results["part_info"] = get_sim_part_info(workPart)
|
||||
print(f"[INTROSPECT-SIM] Part: {results['part_info'].get('name')}")
|
||||
print(f"[INTROSPECT-SIM] Is Assembly: {results['part_info'].get('is_assembly', False)}")
|
||||
|
||||
# Get simulation object
|
||||
print(f"[INTROSPECT-SIM] Finding Simulation object...")
|
||||
try:
|
||||
simSimulation = workPart.FindObject("Simulation")
|
||||
print(f"[INTROSPECT-SIM] Found Simulation object: {type(simSimulation).__name__}")
|
||||
|
||||
# Get solutions
|
||||
print(f"[INTROSPECT-SIM] Extracting solutions...")
|
||||
results["solutions"] = get_solutions(simSimulation)
|
||||
print(f"[INTROSPECT-SIM] Found {len(results['solutions'])} solutions")
|
||||
|
||||
# Get boundary conditions
|
||||
print(f"[INTROSPECT-SIM] Extracting boundary conditions...")
|
||||
results["boundary_conditions"] = get_boundary_conditions(simSimulation, workPart)
|
||||
print(
|
||||
f"[INTROSPECT-SIM] Found {results['boundary_conditions'].get('total_count', 0)} BCs"
|
||||
)
|
||||
|
||||
# Explore tree structure
|
||||
print(f"[INTROSPECT-SIM] Exploring simulation tree...")
|
||||
results["tree_structure"] = explore_simulation_tree(simSimulation, workPart)
|
||||
print(
|
||||
f"[INTROSPECT-SIM] Found types: {results['tree_structure'].get('found_types', [])}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[INTROSPECT-SIM] WARNING: Could not find Simulation object: {e}")
|
||||
results["simulation_object_error"] = str(e)
|
||||
|
||||
# Get CAE session info
|
||||
print(f"[INTROSPECT-SIM] Getting CAE session info...")
|
||||
results["cae_info"] = get_cae_session_info(theSession)
|
||||
|
||||
# List all loaded parts
|
||||
print(f"[INTROSPECT-SIM] Listing loaded parts...")
|
||||
loaded_parts = []
|
||||
for part in theSession.Parts:
|
||||
try:
|
||||
loaded_parts.append(
|
||||
{
|
||||
"name": part.Name,
|
||||
"type": str(type(part).__name__),
|
||||
"leaf": part.Leaf if hasattr(part, "Leaf") else None,
|
||||
}
|
||||
)
|
||||
except:
|
||||
pass
|
||||
results["loaded_parts"] = loaded_parts
|
||||
print(f"[INTROSPECT-SIM] {len(loaded_parts)} parts loaded")
|
||||
|
||||
results["success"] = True
|
||||
print(f"[INTROSPECT-SIM] ")
|
||||
print(f"[INTROSPECT-SIM] INTROSPECTION COMPLETE!")
|
||||
print(f"[INTROSPECT-SIM] " + "=" * 60)
|
||||
|
||||
except Exception as e:
|
||||
results["error"] = str(e)
|
||||
results["success"] = False
|
||||
print(f"[INTROSPECT-SIM] FATAL ERROR: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
# Write results
|
||||
output_file = os.path.join(output_dir, "_introspection_sim.json")
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(results, f, indent=2)
|
||||
print(f"[INTROSPECT-SIM] Results written to: {output_file}")
|
||||
|
||||
return results["success"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
@@ -9,6 +9,7 @@ Modules:
|
||||
- builder: OptimizationConfigBuilder for creating configs
|
||||
- setup_wizard: Interactive configuration setup
|
||||
- capability_matcher: Match capabilities to requirements
|
||||
- spec_models: AtomizerSpec v2.0 Pydantic models (unified configuration)
|
||||
"""
|
||||
|
||||
# Lazy imports to avoid circular dependencies
|
||||
@@ -31,6 +32,27 @@ def __getattr__(name):
|
||||
elif name == 'TemplateLoader':
|
||||
from .template_loader import TemplateLoader
|
||||
return TemplateLoader
|
||||
elif name == 'AtomizerSpec':
|
||||
from .spec_models import AtomizerSpec
|
||||
return AtomizerSpec
|
||||
elif name == 'SpecValidator':
|
||||
from .spec_validator import SpecValidator
|
||||
return SpecValidator
|
||||
elif name == 'SpecValidationError':
|
||||
from .spec_validator import SpecValidationError
|
||||
return SpecValidationError
|
||||
elif name == 'validate_spec':
|
||||
from .spec_validator import validate_spec
|
||||
return validate_spec
|
||||
elif name == 'SpecMigrator':
|
||||
from .migrator import SpecMigrator
|
||||
return SpecMigrator
|
||||
elif name == 'migrate_config':
|
||||
from .migrator import migrate_config
|
||||
return migrate_config
|
||||
elif name == 'migrate_config_file':
|
||||
from .migrator import migrate_config_file
|
||||
return migrate_config_file
|
||||
raise AttributeError(f"module 'optimization_engine.config' has no attribute '{name}'")
|
||||
|
||||
__all__ = [
|
||||
@@ -40,4 +62,11 @@ __all__ = [
|
||||
'SetupWizard',
|
||||
'CapabilityMatcher',
|
||||
'TemplateLoader',
|
||||
'AtomizerSpec',
|
||||
'SpecValidator',
|
||||
'SpecValidationError',
|
||||
'validate_spec',
|
||||
'SpecMigrator',
|
||||
'migrate_config',
|
||||
'migrate_config_file',
|
||||
]
|
||||
|
||||
844
optimization_engine/config/migrator.py
Normal file
844
optimization_engine/config/migrator.py
Normal file
@@ -0,0 +1,844 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 Migrator
|
||||
|
||||
Converts legacy optimization_config.json files to AtomizerSpec v2.0 format.
|
||||
|
||||
Supports migration from:
|
||||
- Mirror/Zernike configs (extraction_method, zernike_settings)
|
||||
- Structural/Bracket configs (optimization_settings, simulation_settings)
|
||||
- Canvas Intent format (simplified canvas output)
|
||||
|
||||
Migration Rules:
|
||||
- bounds: [min, max] -> bounds: {min, max}
|
||||
- parameter -> expression_name
|
||||
- goal/type: "minimize"/"maximize" -> direction: "minimize"/"maximize"
|
||||
- Infers extractors from objectives and extraction settings
|
||||
- Generates canvas edges automatically
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
class MigrationError(Exception):
|
||||
"""Raised when migration fails."""
|
||||
pass
|
||||
|
||||
|
||||
class SpecMigrator:
|
||||
"""
|
||||
Migrate old optimization_config.json to AtomizerSpec v2.0.
|
||||
|
||||
Handles multiple legacy formats and infers missing information.
|
||||
"""
|
||||
|
||||
# Extractor type inference based on objective names
|
||||
EXTRACTOR_INFERENCE = {
|
||||
# Zernike patterns
|
||||
r"wfe|zernike|opd": "zernike_opd",
|
||||
r"mfg|manufacturing": "zernike_opd",
|
||||
r"rms": "zernike_opd",
|
||||
# Structural patterns
|
||||
r"displacement|deflection|deform": "displacement",
|
||||
r"stress|von.?mises": "stress",
|
||||
r"frequency|modal|eigen": "frequency",
|
||||
r"mass|weight": "mass",
|
||||
r"stiffness": "displacement", # Stiffness computed from displacement
|
||||
r"temperature|thermal": "temperature",
|
||||
}
|
||||
|
||||
def __init__(self, study_path: Optional[Path] = None):
|
||||
"""
|
||||
Initialize migrator.
|
||||
|
||||
Args:
|
||||
study_path: Path to study directory (for inferring sim/fem paths)
|
||||
"""
|
||||
self.study_path = Path(study_path) if study_path else None
|
||||
self._extractor_counter = 0
|
||||
self._objective_counter = 0
|
||||
self._constraint_counter = 0
|
||||
self._dv_counter = 0
|
||||
|
||||
def migrate(
|
||||
self,
|
||||
old_config: Dict[str, Any],
|
||||
study_name: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert old config to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
old_config: Legacy config dict
|
||||
study_name: Override study name (defaults to config value)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
# Reset counters
|
||||
self._extractor_counter = 0
|
||||
self._objective_counter = 0
|
||||
self._constraint_counter = 0
|
||||
self._dv_counter = 0
|
||||
|
||||
# Detect config type
|
||||
config_type = self._detect_config_type(old_config)
|
||||
|
||||
# Build spec
|
||||
spec = {
|
||||
"meta": self._migrate_meta(old_config, study_name),
|
||||
"model": self._migrate_model(old_config, config_type),
|
||||
"design_variables": self._migrate_design_variables(old_config),
|
||||
"extractors": [],
|
||||
"objectives": [],
|
||||
"constraints": [],
|
||||
"optimization": self._migrate_optimization(old_config, config_type),
|
||||
"canvas": {"edges": [], "layout_version": "2.0"}
|
||||
}
|
||||
|
||||
# Migrate extractors and objectives together (they're linked)
|
||||
extractors, objectives = self._migrate_extractors_and_objectives(old_config, config_type)
|
||||
spec["extractors"] = extractors
|
||||
spec["objectives"] = objectives
|
||||
|
||||
# Migrate constraints
|
||||
spec["constraints"] = self._migrate_constraints(old_config, spec["extractors"])
|
||||
|
||||
# Generate canvas edges
|
||||
spec["canvas"]["edges"] = self._generate_edges(spec)
|
||||
|
||||
# Add workflow if SAT/turbo settings present
|
||||
if self._has_sat_settings(old_config):
|
||||
spec["workflow"] = self._migrate_workflow(old_config)
|
||||
|
||||
return spec
|
||||
|
||||
def migrate_file(
|
||||
self,
|
||||
config_path: Union[str, Path],
|
||||
output_path: Optional[Union[str, Path]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate a config file and optionally save the result.
|
||||
|
||||
Args:
|
||||
config_path: Path to old config file
|
||||
output_path: Path to save new spec (optional)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.exists():
|
||||
raise MigrationError(f"Config file not found: {config_path}")
|
||||
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
old_config = json.load(f)
|
||||
|
||||
# Infer study path from config location
|
||||
if self.study_path is None:
|
||||
# Config is typically in study_dir/1_setup/ or study_dir/
|
||||
if config_path.parent.name == "1_setup":
|
||||
self.study_path = config_path.parent.parent
|
||||
else:
|
||||
self.study_path = config_path.parent
|
||||
|
||||
spec = self.migrate(old_config)
|
||||
|
||||
if output_path:
|
||||
output_path = Path(output_path)
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(spec, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return spec
|
||||
|
||||
# =========================================================================
|
||||
# Detection
|
||||
# =========================================================================
|
||||
|
||||
def _detect_config_type(self, config: Dict) -> str:
|
||||
"""Detect the type of config format."""
|
||||
if "extraction_method" in config or "zernike_settings" in config:
|
||||
return "mirror"
|
||||
elif "simulation_settings" in config or "extraction_settings" in config:
|
||||
return "structural"
|
||||
elif "optimization_settings" in config:
|
||||
return "structural"
|
||||
elif "extractors" in config:
|
||||
# Already partially in new format (canvas intent)
|
||||
return "canvas_intent"
|
||||
else:
|
||||
# Generic/minimal format
|
||||
return "generic"
|
||||
|
||||
def _has_sat_settings(self, config: Dict) -> bool:
|
||||
"""Check if config has SAT/turbo settings."""
|
||||
return (
|
||||
"sat_settings" in config or
|
||||
config.get("optimization", {}).get("algorithm") in ["SAT_v3", "SAT", "turbo"]
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Meta Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_meta(self, config: Dict, study_name: Optional[str]) -> Dict:
|
||||
"""Migrate metadata section."""
|
||||
now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
|
||||
|
||||
name = study_name or config.get("study_name", "migrated_study")
|
||||
# Ensure snake_case
|
||||
name = re.sub(r'[^a-z0-9_]', '_', name.lower())
|
||||
name = re.sub(r'_+', '_', name).strip('_')
|
||||
|
||||
meta = {
|
||||
"version": "2.0",
|
||||
"created": now,
|
||||
"modified": now,
|
||||
"created_by": "migration",
|
||||
"modified_by": "migration",
|
||||
"study_name": name,
|
||||
"description": config.get("description", ""),
|
||||
"tags": []
|
||||
}
|
||||
|
||||
# Extract tags from various sources
|
||||
if "study_tag" in config:
|
||||
meta["tags"].append(config["study_tag"])
|
||||
|
||||
if "business_context" in config:
|
||||
meta["engineering_context"] = config["business_context"].get("purpose", "")
|
||||
|
||||
# Infer tags from config type
|
||||
if "zernike_settings" in config:
|
||||
meta["tags"].extend(["mirror", "zernike"])
|
||||
if "extraction_method" in config:
|
||||
if config["extraction_method"].get("type") == "zernike_opd":
|
||||
meta["tags"].append("opd")
|
||||
|
||||
return meta
|
||||
|
||||
# =========================================================================
|
||||
# Model Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_model(self, config: Dict, config_type: str) -> Dict:
|
||||
"""Migrate model section (sim/fem/prt paths)."""
|
||||
model = {
|
||||
"sim": {
|
||||
"path": "",
|
||||
"solver": "nastran"
|
||||
}
|
||||
}
|
||||
|
||||
# Extract from nx_settings (mirror format)
|
||||
if "nx_settings" in config:
|
||||
nx = config["nx_settings"]
|
||||
model["sim"]["path"] = nx.get("sim_file", "")
|
||||
if "nx_install_path" in nx:
|
||||
model["nx_settings"] = {
|
||||
"nx_install_path": nx["nx_install_path"],
|
||||
"simulation_timeout_s": nx.get("simulation_timeout_s", 600)
|
||||
}
|
||||
|
||||
# Extract from simulation_settings (structural format)
|
||||
elif "simulation_settings" in config:
|
||||
sim = config["simulation_settings"]
|
||||
model["sim"]["path"] = sim.get("sim_file", "")
|
||||
solver = sim.get("solver", "nastran").lower()
|
||||
# Normalize solver name - valid values: nastran, NX_Nastran, abaqus
|
||||
solver_map = {"nx": "nastran", "nx_nastran": "NX_Nastran", "nxnastran": "NX_Nastran"}
|
||||
model["sim"]["solver"] = solver_map.get(solver, "nastran" if solver not in ["nastran", "NX_Nastran", "abaqus"] else solver)
|
||||
if sim.get("solution_type"):
|
||||
model["sim"]["solution_type"] = sim["solution_type"]
|
||||
|
||||
if sim.get("model_file"):
|
||||
model["nx_part"] = {"path": sim["model_file"]}
|
||||
if sim.get("fem_file"):
|
||||
model["fem"] = {"path": sim["fem_file"]}
|
||||
|
||||
# Try to infer from study path
|
||||
if self.study_path and not model["sim"]["path"]:
|
||||
setup_dir = self.study_path / "1_setup" / "model"
|
||||
if setup_dir.exists():
|
||||
for f in setup_dir.glob("*.sim"):
|
||||
model["sim"]["path"] = str(f.relative_to(self.study_path))
|
||||
break
|
||||
|
||||
return model
|
||||
|
||||
# =========================================================================
|
||||
# Design Variables Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_design_variables(self, config: Dict) -> List[Dict]:
|
||||
"""Migrate design variables."""
|
||||
dvs = []
|
||||
|
||||
for dv in config.get("design_variables", []):
|
||||
self._dv_counter += 1
|
||||
|
||||
# Handle different bound formats
|
||||
if "bounds" in dv:
|
||||
if isinstance(dv["bounds"], list):
|
||||
bounds = {"min": dv["bounds"][0], "max": dv["bounds"][1]}
|
||||
else:
|
||||
bounds = dv["bounds"]
|
||||
else:
|
||||
bounds = {"min": dv.get("min", 0), "max": dv.get("max", 1)}
|
||||
|
||||
# Ensure min < max (fix degenerate cases)
|
||||
if bounds["min"] >= bounds["max"]:
|
||||
# Expand bounds slightly around the value
|
||||
val = bounds["min"]
|
||||
if val == 0:
|
||||
bounds = {"min": -0.001, "max": 0.001}
|
||||
else:
|
||||
bounds = {"min": val * 0.99, "max": val * 1.01}
|
||||
|
||||
# Determine type
|
||||
dv_type = dv.get("type", "continuous")
|
||||
if dv_type not in ["continuous", "integer", "categorical"]:
|
||||
dv_type = "continuous"
|
||||
|
||||
new_dv = {
|
||||
"id": f"dv_{self._dv_counter:03d}",
|
||||
"name": dv.get("name", f"param_{self._dv_counter}"),
|
||||
"expression_name": dv.get("expression_name", dv.get("parameter", dv.get("name", ""))),
|
||||
"type": dv_type,
|
||||
"bounds": bounds,
|
||||
"baseline": dv.get("baseline", dv.get("initial")),
|
||||
"units": dv.get("units", dv.get("unit", "")),
|
||||
"enabled": dv.get("enabled", True),
|
||||
"description": dv.get("description", dv.get("notes", "")),
|
||||
"canvas_position": {"x": 50, "y": 100 + (self._dv_counter - 1) * 80}
|
||||
}
|
||||
|
||||
dvs.append(new_dv)
|
||||
|
||||
return dvs
|
||||
|
||||
# =========================================================================
|
||||
# Extractors and Objectives Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_extractors_and_objectives(
|
||||
self,
|
||||
config: Dict,
|
||||
config_type: str
|
||||
) -> Tuple[List[Dict], List[Dict]]:
|
||||
"""
|
||||
Migrate extractors and objectives together.
|
||||
|
||||
Returns tuple of (extractors, objectives).
|
||||
"""
|
||||
extractors = []
|
||||
objectives = []
|
||||
|
||||
# Handle mirror/zernike configs
|
||||
if config_type == "mirror" and "zernike_settings" in config:
|
||||
extractor = self._create_zernike_extractor(config)
|
||||
extractors.append(extractor)
|
||||
|
||||
# Create objectives from config
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
objectives.append(self._create_objective(obj, extractor["id"]))
|
||||
|
||||
# Handle structural configs
|
||||
elif config_type == "structural":
|
||||
# Create extractors based on extraction_settings
|
||||
if "extraction_settings" in config:
|
||||
extractor = self._create_structural_extractor(config)
|
||||
extractors.append(extractor)
|
||||
ext_id = extractor["id"]
|
||||
else:
|
||||
# Infer extractors from objectives
|
||||
ext_id = None
|
||||
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
|
||||
# Infer extractor if not yet created
|
||||
if ext_id is None:
|
||||
inferred_type = self._infer_extractor_type(obj.get("name", ""))
|
||||
ext_id = self._get_or_create_extractor(extractors, inferred_type, obj.get("name", ""))
|
||||
|
||||
objectives.append(self._create_objective(obj, ext_id))
|
||||
|
||||
# Handle canvas intent or generic
|
||||
else:
|
||||
# Pass through existing extractors if present
|
||||
for ext in config.get("extractors", []):
|
||||
self._extractor_counter += 1
|
||||
ext_copy = dict(ext)
|
||||
if "id" not in ext_copy:
|
||||
ext_copy["id"] = f"ext_{self._extractor_counter:03d}"
|
||||
extractors.append(ext_copy)
|
||||
|
||||
# Create objectives
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
|
||||
# Find or create extractor
|
||||
ext_id = None
|
||||
if extractors:
|
||||
ext_id = extractors[0]["id"]
|
||||
else:
|
||||
inferred_type = self._infer_extractor_type(obj.get("name", ""))
|
||||
ext_id = self._get_or_create_extractor(extractors, inferred_type, obj.get("name", ""))
|
||||
|
||||
objectives.append(self._create_objective(obj, ext_id))
|
||||
|
||||
return extractors, objectives
|
||||
|
||||
def _create_zernike_extractor(self, config: Dict) -> Dict:
|
||||
"""Create a Zernike OPD extractor from config."""
|
||||
self._extractor_counter += 1
|
||||
|
||||
zs = config.get("zernike_settings", {})
|
||||
em = config.get("extraction_method", {})
|
||||
|
||||
# Collect all output names from objectives
|
||||
outputs = []
|
||||
for obj in config.get("objectives", []):
|
||||
obj_name = obj.get("name", "")
|
||||
outputs.append({
|
||||
"name": obj_name,
|
||||
"metric": "filtered_rms_nm"
|
||||
})
|
||||
|
||||
# Get outer radius with sensible default for telescope mirrors
|
||||
outer_radius = em.get("outer_radius", zs.get("outer_radius"))
|
||||
if outer_radius is None:
|
||||
# Default to typical M1 mirror outer radius
|
||||
outer_radius = 500.0
|
||||
|
||||
extractor = {
|
||||
"id": f"ext_{self._extractor_counter:03d}",
|
||||
"name": "Zernike WFE Extractor",
|
||||
"type": "zernike_opd",
|
||||
"builtin": True,
|
||||
"config": {
|
||||
"inner_radius_mm": em.get("inner_radius", zs.get("inner_radius", 0)),
|
||||
"outer_radius_mm": outer_radius,
|
||||
"n_modes": zs.get("n_modes", 40),
|
||||
"filter_low_orders": zs.get("filter_low_orders", 4),
|
||||
"displacement_unit": zs.get("displacement_unit", "mm"),
|
||||
"reference_subcase": int(zs.get("reference_subcase", 1))
|
||||
},
|
||||
"outputs": outputs,
|
||||
"canvas_position": {"x": 740, "y": 100}
|
||||
}
|
||||
|
||||
return extractor
|
||||
|
||||
def _create_structural_extractor(self, config: Dict) -> Dict:
|
||||
"""Create extractor from extraction_settings."""
|
||||
self._extractor_counter += 1
|
||||
|
||||
es = config.get("extraction_settings", {})
|
||||
|
||||
# Infer type from extractor class name
|
||||
extractor_class = es.get("extractor_class", "")
|
||||
if "stiffness" in extractor_class.lower():
|
||||
ext_type = "displacement"
|
||||
elif "stress" in extractor_class.lower():
|
||||
ext_type = "stress"
|
||||
elif "frequency" in extractor_class.lower():
|
||||
ext_type = "frequency"
|
||||
else:
|
||||
ext_type = "displacement"
|
||||
|
||||
# Create outputs from objectives
|
||||
outputs = []
|
||||
for obj in config.get("objectives", []):
|
||||
outputs.append({
|
||||
"name": obj.get("name", "output"),
|
||||
"metric": es.get("displacement_aggregation", "max")
|
||||
})
|
||||
|
||||
extractor = {
|
||||
"id": f"ext_{self._extractor_counter:03d}",
|
||||
"name": f"{extractor_class or 'Results'} Extractor",
|
||||
"type": ext_type,
|
||||
"builtin": True,
|
||||
"config": {
|
||||
"result_type": es.get("displacement_component", "z"),
|
||||
"metric": es.get("displacement_aggregation", "max")
|
||||
},
|
||||
"outputs": outputs,
|
||||
"canvas_position": {"x": 740, "y": 100}
|
||||
}
|
||||
|
||||
return extractor
|
||||
|
||||
def _infer_extractor_type(self, objective_name: str) -> str:
|
||||
"""Infer extractor type from objective name."""
|
||||
name_lower = objective_name.lower()
|
||||
|
||||
for pattern, ext_type in self.EXTRACTOR_INFERENCE.items():
|
||||
if re.search(pattern, name_lower):
|
||||
return ext_type
|
||||
|
||||
return "displacement" # Default
|
||||
|
||||
def _get_or_create_extractor(
|
||||
self,
|
||||
extractors: List[Dict],
|
||||
ext_type: str,
|
||||
output_name: str
|
||||
) -> str:
|
||||
"""Get existing extractor of type or create new one."""
|
||||
# Look for existing
|
||||
for ext in extractors:
|
||||
if ext.get("type") == ext_type:
|
||||
# Add output if not present
|
||||
output_names = {o["name"] for o in ext.get("outputs", [])}
|
||||
if output_name not in output_names:
|
||||
ext["outputs"].append({"name": output_name, "metric": "total"})
|
||||
return ext["id"]
|
||||
|
||||
# Create new
|
||||
self._extractor_counter += 1
|
||||
ext_id = f"ext_{self._extractor_counter:03d}"
|
||||
|
||||
extractor = {
|
||||
"id": ext_id,
|
||||
"name": f"{ext_type.title()} Extractor",
|
||||
"type": ext_type,
|
||||
"builtin": True,
|
||||
"outputs": [{"name": output_name, "metric": "total"}],
|
||||
"canvas_position": {"x": 740, "y": 100 + (len(extractors)) * 150}
|
||||
}
|
||||
|
||||
extractors.append(extractor)
|
||||
return ext_id
|
||||
|
||||
def _create_objective(self, obj: Dict, extractor_id: str) -> Dict:
|
||||
"""Create objective from old format."""
|
||||
# Normalize direction
|
||||
direction = obj.get("direction", obj.get("type", obj.get("goal", "minimize")))
|
||||
if direction not in ["minimize", "maximize"]:
|
||||
direction = "minimize" if "min" in direction.lower() else "maximize"
|
||||
|
||||
obj_name = obj.get("name", f"objective_{self._objective_counter}")
|
||||
|
||||
return {
|
||||
"id": f"obj_{self._objective_counter:03d}",
|
||||
"name": obj.get("description", obj_name),
|
||||
"direction": direction,
|
||||
"weight": obj.get("weight", 1.0),
|
||||
"source": {
|
||||
"extractor_id": extractor_id,
|
||||
"output_name": obj_name
|
||||
},
|
||||
"target": obj.get("target"),
|
||||
"units": obj.get("units", ""),
|
||||
"canvas_position": {"x": 1020, "y": 100 + (self._objective_counter - 1) * 100}
|
||||
}
|
||||
|
||||
# =========================================================================
|
||||
# Constraints Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_constraints(self, config: Dict, extractors: List[Dict]) -> List[Dict]:
|
||||
"""Migrate constraints."""
|
||||
constraints = []
|
||||
|
||||
for con in config.get("constraints", []):
|
||||
self._constraint_counter += 1
|
||||
|
||||
# Determine constraint type
|
||||
con_type = con.get("type", "hard")
|
||||
if con_type not in ["hard", "soft"]:
|
||||
# Infer from type field
|
||||
if con_type in ["less_than", "greater_than", "less_equal", "greater_equal"]:
|
||||
con_type = "hard"
|
||||
|
||||
# Determine operator
|
||||
operator = con.get("operator", "<=")
|
||||
old_type = con.get("type", "")
|
||||
if "less" in old_type:
|
||||
operator = "<=" if "equal" in old_type else "<"
|
||||
elif "greater" in old_type:
|
||||
operator = ">=" if "equal" in old_type else ">"
|
||||
|
||||
# Try to parse expression for threshold
|
||||
threshold = con.get("threshold", con.get("value"))
|
||||
if threshold is None and "expression" in con:
|
||||
# Parse from expression like "mass_kg <= 120.0"
|
||||
match = re.search(r'([<>=!]+)\s*([\d.]+)', con["expression"])
|
||||
if match:
|
||||
operator = match.group(1)
|
||||
threshold = float(match.group(2))
|
||||
|
||||
# Find or create extractor for constraint
|
||||
con_name = con.get("name", "constraint")
|
||||
extractor_id = None
|
||||
output_name = con_name
|
||||
|
||||
# Check if name matches existing objective (share extractor)
|
||||
for ext in extractors:
|
||||
for out in ext.get("outputs", []):
|
||||
if con_name.replace("_max", "").replace("_min", "") in out["name"]:
|
||||
extractor_id = ext["id"]
|
||||
output_name = out["name"]
|
||||
break
|
||||
if extractor_id:
|
||||
break
|
||||
|
||||
# If no match, use first extractor or create mass extractor for mass constraints
|
||||
if extractor_id is None:
|
||||
if "mass" in con_name.lower():
|
||||
# Check if mass extractor exists
|
||||
for ext in extractors:
|
||||
if ext.get("type") == "mass":
|
||||
extractor_id = ext["id"]
|
||||
break
|
||||
|
||||
if extractor_id is None:
|
||||
# Create mass extractor
|
||||
ext_id = f"ext_{len(extractors) + 1:03d}"
|
||||
extractors.append({
|
||||
"id": ext_id,
|
||||
"name": "Mass Extractor",
|
||||
"type": "mass",
|
||||
"builtin": True,
|
||||
"outputs": [{"name": "mass_kg", "metric": "total"}],
|
||||
"canvas_position": {"x": 740, "y": 100 + len(extractors) * 150}
|
||||
})
|
||||
extractor_id = ext_id
|
||||
output_name = "mass_kg"
|
||||
elif extractors:
|
||||
extractor_id = extractors[0]["id"]
|
||||
output_name = extractors[0]["outputs"][0]["name"] if extractors[0].get("outputs") else con_name
|
||||
|
||||
constraint = {
|
||||
"id": f"con_{self._constraint_counter:03d}",
|
||||
"name": con.get("description", con_name),
|
||||
"type": con_type if con_type in ["hard", "soft"] else "hard",
|
||||
"operator": operator,
|
||||
"threshold": threshold or 0,
|
||||
"source": {
|
||||
"extractor_id": extractor_id or "ext_001",
|
||||
"output_name": output_name
|
||||
},
|
||||
"penalty_config": {
|
||||
"method": "quadratic",
|
||||
"weight": con.get("penalty_weight", 1000.0)
|
||||
},
|
||||
"canvas_position": {"x": 1020, "y": 400 + (self._constraint_counter - 1) * 100}
|
||||
}
|
||||
|
||||
constraints.append(constraint)
|
||||
|
||||
return constraints
|
||||
|
||||
# =========================================================================
|
||||
# Optimization Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_optimization(self, config: Dict, config_type: str) -> Dict:
|
||||
"""Migrate optimization settings."""
|
||||
# Extract from different locations
|
||||
if "optimization" in config:
|
||||
opt = config["optimization"]
|
||||
elif "optimization_settings" in config:
|
||||
opt = config["optimization_settings"]
|
||||
else:
|
||||
opt = {}
|
||||
|
||||
# Normalize algorithm name
|
||||
algo = opt.get("algorithm", opt.get("sampler", "TPE"))
|
||||
algo_map = {
|
||||
"tpe": "TPE",
|
||||
"tpesampler": "TPE",
|
||||
"cma-es": "CMA-ES",
|
||||
"cmaes": "CMA-ES",
|
||||
"nsga-ii": "NSGA-II",
|
||||
"nsgaii": "NSGA-II",
|
||||
"nsga2": "NSGA-II",
|
||||
"random": "RandomSearch",
|
||||
"randomsampler": "RandomSearch",
|
||||
"randomsearch": "RandomSearch",
|
||||
"sat": "SAT_v3",
|
||||
"sat_v3": "SAT_v3",
|
||||
"turbo": "SAT_v3",
|
||||
"gp": "GP-BO",
|
||||
"gp-bo": "GP-BO",
|
||||
"gpbo": "GP-BO",
|
||||
"bo": "GP-BO",
|
||||
"bayesian": "GP-BO"
|
||||
}
|
||||
# Valid algorithm types for schema
|
||||
valid_algorithms = {"TPE", "CMA-ES", "NSGA-II", "RandomSearch", "SAT_v3", "GP-BO"}
|
||||
algo = algo_map.get(algo.lower(), algo)
|
||||
# Fallback to TPE if still invalid
|
||||
if algo not in valid_algorithms:
|
||||
algo = "TPE"
|
||||
|
||||
optimization = {
|
||||
"algorithm": {
|
||||
"type": algo,
|
||||
"config": {}
|
||||
},
|
||||
"budget": {
|
||||
"max_trials": opt.get("n_trials", 100)
|
||||
},
|
||||
"canvas_position": {"x": 1300, "y": 150}
|
||||
}
|
||||
|
||||
# Algorithm-specific config
|
||||
if algo == "CMA-ES":
|
||||
optimization["algorithm"]["config"]["sigma0"] = opt.get("sigma0", 0.3)
|
||||
elif algo == "NSGA-II":
|
||||
optimization["algorithm"]["config"]["population_size"] = opt.get("population_size", 50)
|
||||
elif algo == "TPE":
|
||||
optimization["algorithm"]["config"]["n_startup_trials"] = opt.get("n_startup_trials", 10)
|
||||
|
||||
# Seed
|
||||
if "seed" in opt:
|
||||
optimization["algorithm"]["config"]["seed"] = opt["seed"]
|
||||
|
||||
# Timeout/patience
|
||||
if opt.get("timeout"):
|
||||
optimization["budget"]["max_time_hours"] = opt["timeout"] / 3600
|
||||
|
||||
# SAT/surrogate settings
|
||||
if "sat_settings" in config:
|
||||
sat = config["sat_settings"]
|
||||
optimization["surrogate"] = {
|
||||
"enabled": True,
|
||||
"type": "ensemble",
|
||||
"config": {
|
||||
"n_models": sat.get("n_ensemble_models", 10),
|
||||
"architecture": sat.get("hidden_dims", [256, 128]),
|
||||
"train_every_n_trials": sat.get("retrain_frequency", 20),
|
||||
"min_training_samples": sat.get("min_samples", 30)
|
||||
}
|
||||
}
|
||||
|
||||
return optimization
|
||||
|
||||
# =========================================================================
|
||||
# Workflow Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_workflow(self, config: Dict) -> Dict:
|
||||
"""Migrate SAT/turbo workflow settings."""
|
||||
sat = config.get("sat_settings", {})
|
||||
|
||||
exploration_trials = sat.get("min_samples", 30)
|
||||
total_trials = config.get("optimization", {}).get("n_trials", 100)
|
||||
|
||||
return {
|
||||
"stages": [
|
||||
{
|
||||
"id": "stage_exploration",
|
||||
"name": "Design Space Exploration",
|
||||
"algorithm": "RandomSearch",
|
||||
"trials": exploration_trials,
|
||||
"purpose": "Build initial training data for surrogate"
|
||||
},
|
||||
{
|
||||
"id": "stage_optimization",
|
||||
"name": "Surrogate-Assisted Optimization",
|
||||
"algorithm": "SAT_v3",
|
||||
"trials": total_trials - exploration_trials,
|
||||
"purpose": "Neural-accelerated optimization"
|
||||
}
|
||||
],
|
||||
"transitions": [
|
||||
{
|
||||
"from": "stage_exploration",
|
||||
"to": "stage_optimization",
|
||||
"condition": f"trial_count >= {exploration_trials}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# =========================================================================
|
||||
# Canvas Edge Generation
|
||||
# =========================================================================
|
||||
|
||||
def _generate_edges(self, spec: Dict) -> List[Dict]:
|
||||
"""Generate canvas edges connecting nodes."""
|
||||
edges = []
|
||||
|
||||
# DVs -> model
|
||||
for dv in spec.get("design_variables", []):
|
||||
edges.append({"source": dv["id"], "target": "model"})
|
||||
|
||||
# model -> solver
|
||||
edges.append({"source": "model", "target": "solver"})
|
||||
|
||||
# solver -> extractors
|
||||
for ext in spec.get("extractors", []):
|
||||
edges.append({"source": "solver", "target": ext["id"]})
|
||||
|
||||
# extractors -> objectives
|
||||
for obj in spec.get("objectives", []):
|
||||
ext_id = obj.get("source", {}).get("extractor_id")
|
||||
if ext_id:
|
||||
edges.append({"source": ext_id, "target": obj["id"]})
|
||||
|
||||
# extractors -> constraints
|
||||
for con in spec.get("constraints", []):
|
||||
ext_id = con.get("source", {}).get("extractor_id")
|
||||
if ext_id:
|
||||
edges.append({"source": ext_id, "target": con["id"]})
|
||||
|
||||
# objectives -> optimization
|
||||
for obj in spec.get("objectives", []):
|
||||
edges.append({"source": obj["id"], "target": "optimization"})
|
||||
|
||||
# constraints -> optimization
|
||||
for con in spec.get("constraints", []):
|
||||
edges.append({"source": con["id"], "target": "optimization"})
|
||||
|
||||
return edges
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Convenience Functions
|
||||
# ============================================================================
|
||||
|
||||
def migrate_config(
|
||||
old_config: Dict[str, Any],
|
||||
study_name: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate old config dict to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
old_config: Legacy config dict
|
||||
study_name: Override study name
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
migrator = SpecMigrator()
|
||||
return migrator.migrate(old_config, study_name)
|
||||
|
||||
|
||||
def migrate_config_file(
|
||||
config_path: Union[str, Path],
|
||||
output_path: Optional[Union[str, Path]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate a config file to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
config_path: Path to old config file
|
||||
output_path: Path to save new spec (optional)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
migrator = SpecMigrator()
|
||||
return migrator.migrate_file(config_path, output_path)
|
||||
674
optimization_engine/config/spec_models.py
Normal file
674
optimization_engine/config/spec_models.py
Normal file
@@ -0,0 +1,674 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 Pydantic Models
|
||||
|
||||
These models match the JSON Schema at optimization_engine/schemas/atomizer_spec_v2.json
|
||||
They provide validation and type safety for the unified configuration system.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
import re
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Enums
|
||||
# ============================================================================
|
||||
|
||||
class SpecCreatedBy(str, Enum):
|
||||
"""Who/what created the spec."""
|
||||
CANVAS = "canvas"
|
||||
CLAUDE = "claude"
|
||||
API = "api"
|
||||
MIGRATION = "migration"
|
||||
MANUAL = "manual"
|
||||
|
||||
|
||||
class SolverType(str, Enum):
|
||||
"""Supported solver types."""
|
||||
NASTRAN = "nastran"
|
||||
NX_NASTRAN = "NX_Nastran"
|
||||
ABAQUS = "abaqus"
|
||||
|
||||
|
||||
class SubcaseType(str, Enum):
|
||||
"""Subcase analysis types."""
|
||||
STATIC = "static"
|
||||
MODAL = "modal"
|
||||
THERMAL = "thermal"
|
||||
BUCKLING = "buckling"
|
||||
|
||||
|
||||
class DesignVariableType(str, Enum):
|
||||
"""Design variable types."""
|
||||
CONTINUOUS = "continuous"
|
||||
INTEGER = "integer"
|
||||
CATEGORICAL = "categorical"
|
||||
|
||||
|
||||
class ExtractorType(str, Enum):
|
||||
"""Physics extractor types."""
|
||||
DISPLACEMENT = "displacement"
|
||||
FREQUENCY = "frequency"
|
||||
STRESS = "stress"
|
||||
MASS = "mass"
|
||||
MASS_EXPRESSION = "mass_expression"
|
||||
ZERNIKE_OPD = "zernike_opd"
|
||||
ZERNIKE_CSV = "zernike_csv"
|
||||
TEMPERATURE = "temperature"
|
||||
CUSTOM_FUNCTION = "custom_function"
|
||||
|
||||
|
||||
class OptimizationDirection(str, Enum):
|
||||
"""Optimization direction."""
|
||||
MINIMIZE = "minimize"
|
||||
MAXIMIZE = "maximize"
|
||||
|
||||
|
||||
class ConstraintType(str, Enum):
|
||||
"""Constraint types."""
|
||||
HARD = "hard"
|
||||
SOFT = "soft"
|
||||
|
||||
|
||||
class ConstraintOperator(str, Enum):
|
||||
"""Constraint comparison operators."""
|
||||
LE = "<="
|
||||
GE = ">="
|
||||
LT = "<"
|
||||
GT = ">"
|
||||
EQ = "=="
|
||||
|
||||
|
||||
class PenaltyMethod(str, Enum):
|
||||
"""Penalty methods for constraints."""
|
||||
LINEAR = "linear"
|
||||
QUADRATIC = "quadratic"
|
||||
EXPONENTIAL = "exponential"
|
||||
|
||||
|
||||
class AlgorithmType(str, Enum):
|
||||
"""Optimization algorithm types."""
|
||||
TPE = "TPE"
|
||||
CMA_ES = "CMA-ES"
|
||||
NSGA_II = "NSGA-II"
|
||||
RANDOM_SEARCH = "RandomSearch"
|
||||
SAT_V3 = "SAT_v3"
|
||||
GP_BO = "GP-BO"
|
||||
|
||||
|
||||
class SurrogateType(str, Enum):
|
||||
"""Surrogate model types."""
|
||||
MLP = "MLP"
|
||||
GNN = "GNN"
|
||||
ENSEMBLE = "ensemble"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Position Model
|
||||
# ============================================================================
|
||||
|
||||
class CanvasPosition(BaseModel):
|
||||
"""Canvas position for nodes."""
|
||||
x: float = 0
|
||||
y: float = 0
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Meta Models
|
||||
# ============================================================================
|
||||
|
||||
class SpecMeta(BaseModel):
|
||||
"""Metadata about the spec."""
|
||||
version: str = Field(
|
||||
...,
|
||||
pattern=r"^2\.\d+$",
|
||||
description="Schema version (e.g., '2.0')"
|
||||
)
|
||||
created: Optional[datetime] = Field(
|
||||
default=None,
|
||||
description="When the spec was created"
|
||||
)
|
||||
modified: Optional[datetime] = Field(
|
||||
default=None,
|
||||
description="When the spec was last modified"
|
||||
)
|
||||
created_by: Optional[SpecCreatedBy] = Field(
|
||||
default=None,
|
||||
description="Who/what created the spec"
|
||||
)
|
||||
modified_by: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Who/what last modified the spec"
|
||||
)
|
||||
study_name: str = Field(
|
||||
...,
|
||||
min_length=3,
|
||||
max_length=100,
|
||||
pattern=r"^[a-z0-9_]+$",
|
||||
description="Unique study identifier (snake_case)"
|
||||
)
|
||||
description: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=1000,
|
||||
description="Human-readable description"
|
||||
)
|
||||
tags: Optional[List[str]] = Field(
|
||||
default=None,
|
||||
description="Tags for categorization"
|
||||
)
|
||||
engineering_context: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Real-world engineering context"
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Model Configuration Models
|
||||
# ============================================================================
|
||||
|
||||
class NxPartConfig(BaseModel):
|
||||
"""NX geometry part file configuration."""
|
||||
path: Optional[str] = Field(default=None, description="Path to .prt file")
|
||||
hash: Optional[str] = Field(default=None, description="File hash for change detection")
|
||||
idealized_part: Optional[str] = Field(default=None, description="Idealized part filename (_i.prt)")
|
||||
|
||||
|
||||
class FemConfig(BaseModel):
|
||||
"""FEM mesh file configuration."""
|
||||
path: Optional[str] = Field(default=None, description="Path to .fem file")
|
||||
element_count: Optional[int] = Field(default=None, description="Number of elements")
|
||||
node_count: Optional[int] = Field(default=None, description="Number of nodes")
|
||||
|
||||
|
||||
class Subcase(BaseModel):
|
||||
"""Simulation subcase definition."""
|
||||
id: int
|
||||
name: Optional[str] = None
|
||||
type: Optional[SubcaseType] = None
|
||||
|
||||
|
||||
class SimConfig(BaseModel):
|
||||
"""Simulation file configuration."""
|
||||
path: str = Field(..., description="Path to .sim file")
|
||||
solver: SolverType = Field(..., description="Solver type")
|
||||
solution_type: Optional[str] = Field(
|
||||
default=None,
|
||||
pattern=r"^SOL\d+$",
|
||||
description="Solution type (e.g., SOL101)"
|
||||
)
|
||||
subcases: Optional[List[Subcase]] = Field(default=None, description="Defined subcases")
|
||||
|
||||
|
||||
class NxSettings(BaseModel):
|
||||
"""NX runtime settings."""
|
||||
nx_install_path: Optional[str] = None
|
||||
simulation_timeout_s: Optional[int] = Field(default=None, ge=60, le=7200)
|
||||
auto_start_nx: Optional[bool] = None
|
||||
|
||||
|
||||
class ModelConfig(BaseModel):
|
||||
"""NX model files and configuration."""
|
||||
nx_part: Optional[NxPartConfig] = None
|
||||
fem: Optional[FemConfig] = None
|
||||
sim: SimConfig
|
||||
nx_settings: Optional[NxSettings] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Design Variable Models
|
||||
# ============================================================================
|
||||
|
||||
class DesignVariableBounds(BaseModel):
|
||||
"""Design variable bounds."""
|
||||
min: float
|
||||
max: float
|
||||
|
||||
@model_validator(mode='after')
|
||||
def validate_bounds(self) -> 'DesignVariableBounds':
|
||||
if self.min >= self.max:
|
||||
raise ValueError(f"min ({self.min}) must be less than max ({self.max})")
|
||||
return self
|
||||
|
||||
|
||||
class DesignVariable(BaseModel):
|
||||
"""A design variable to optimize."""
|
||||
id: str = Field(
|
||||
...,
|
||||
pattern=r"^dv_\d{3}$",
|
||||
description="Unique identifier (pattern: dv_XXX)"
|
||||
)
|
||||
name: str = Field(..., description="Human-readable name")
|
||||
expression_name: str = Field(
|
||||
...,
|
||||
pattern=r"^[a-zA-Z_][a-zA-Z0-9_]*$",
|
||||
description="NX expression name (must match model)"
|
||||
)
|
||||
type: DesignVariableType = Field(..., description="Variable type")
|
||||
bounds: DesignVariableBounds = Field(..., description="Value bounds")
|
||||
baseline: Optional[float] = Field(default=None, description="Current/initial value")
|
||||
units: Optional[str] = Field(default=None, description="Physical units (mm, deg, etc.)")
|
||||
step: Optional[float] = Field(default=None, description="Step size for integer/discrete")
|
||||
enabled: bool = Field(default=True, description="Whether to include in optimization")
|
||||
description: Optional[str] = None
|
||||
canvas_position: Optional[CanvasPosition] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Extractor Models
|
||||
# ============================================================================
|
||||
|
||||
class ExtractorConfig(BaseModel):
|
||||
"""Type-specific extractor configuration."""
|
||||
inner_radius_mm: Optional[float] = None
|
||||
outer_radius_mm: Optional[float] = None
|
||||
n_modes: Optional[int] = None
|
||||
filter_low_orders: Optional[int] = None
|
||||
displacement_unit: Optional[str] = None
|
||||
reference_subcase: Optional[int] = None
|
||||
expression_name: Optional[str] = None
|
||||
mode_number: Optional[int] = None
|
||||
element_type: Optional[str] = None
|
||||
result_type: Optional[str] = None
|
||||
metric: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow additional fields for flexibility
|
||||
|
||||
|
||||
class CustomFunction(BaseModel):
|
||||
"""Custom function definition for custom_function extractors."""
|
||||
name: Optional[str] = Field(default=None, description="Function name")
|
||||
module: Optional[str] = Field(default=None, description="Python module path")
|
||||
signature: Optional[str] = Field(default=None, description="Function signature")
|
||||
source_code: Optional[str] = Field(default=None, description="Python source code")
|
||||
|
||||
|
||||
class ExtractorOutput(BaseModel):
|
||||
"""Output definition for an extractor."""
|
||||
name: str = Field(..., description="Output name (used by objectives/constraints)")
|
||||
metric: Optional[str] = Field(default=None, description="Specific metric (max, total, rms, etc.)")
|
||||
subcase: Optional[int] = Field(default=None, description="Subcase ID for this output")
|
||||
units: Optional[str] = None
|
||||
|
||||
|
||||
class Extractor(BaseModel):
|
||||
"""Physics extractor that computes outputs from FEA."""
|
||||
id: str = Field(
|
||||
...,
|
||||
pattern=r"^ext_\d{3}$",
|
||||
description="Unique identifier (pattern: ext_XXX)"
|
||||
)
|
||||
name: str = Field(..., description="Human-readable name")
|
||||
type: ExtractorType = Field(..., description="Extractor type")
|
||||
builtin: bool = Field(default=True, description="Whether this is a built-in extractor")
|
||||
config: Optional[ExtractorConfig] = Field(default=None, description="Type-specific configuration")
|
||||
function: Optional[CustomFunction] = Field(
|
||||
default=None,
|
||||
description="Custom function definition (for custom_function type)"
|
||||
)
|
||||
outputs: List[ExtractorOutput] = Field(..., min_length=1, description="Output values")
|
||||
canvas_position: Optional[CanvasPosition] = None
|
||||
|
||||
@model_validator(mode='after')
|
||||
def validate_custom_function(self) -> 'Extractor':
|
||||
if self.type == ExtractorType.CUSTOM_FUNCTION and self.function is None:
|
||||
raise ValueError("custom_function extractor requires function definition")
|
||||
return self
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Objective Models
|
||||
# ============================================================================
|
||||
|
||||
class ObjectiveSource(BaseModel):
|
||||
"""Source reference for objective value."""
|
||||
extractor_id: str = Field(..., description="Reference to extractor")
|
||||
output_name: str = Field(..., description="Which output from the extractor")
|
||||
|
||||
|
||||
class Objective(BaseModel):
|
||||
"""Optimization objective."""
|
||||
id: str = Field(
|
||||
...,
|
||||
pattern=r"^obj_\d{3}$",
|
||||
description="Unique identifier (pattern: obj_XXX)"
|
||||
)
|
||||
name: str = Field(..., description="Human-readable name")
|
||||
direction: OptimizationDirection = Field(..., description="Optimization direction")
|
||||
weight: float = Field(default=1.0, ge=0, description="Weight for weighted sum")
|
||||
source: ObjectiveSource = Field(..., description="Where the value comes from")
|
||||
target: Optional[float] = Field(default=None, description="Target value (for goal programming)")
|
||||
units: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
canvas_position: Optional[CanvasPosition] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Constraint Models
|
||||
# ============================================================================
|
||||
|
||||
class ConstraintSource(BaseModel):
|
||||
"""Source reference for constraint value."""
|
||||
extractor_id: str
|
||||
output_name: str
|
||||
|
||||
|
||||
class PenaltyConfig(BaseModel):
|
||||
"""Penalty method configuration for constraints."""
|
||||
method: Optional[PenaltyMethod] = None
|
||||
weight: Optional[float] = None
|
||||
margin: Optional[float] = Field(default=None, description="Soft margin before penalty kicks in")
|
||||
|
||||
|
||||
class Constraint(BaseModel):
|
||||
"""Hard or soft constraint."""
|
||||
id: str = Field(
|
||||
...,
|
||||
pattern=r"^con_\d{3}$",
|
||||
description="Unique identifier (pattern: con_XXX)"
|
||||
)
|
||||
name: str
|
||||
type: ConstraintType = Field(..., description="Constraint type")
|
||||
operator: ConstraintOperator = Field(..., description="Comparison operator")
|
||||
threshold: float = Field(..., description="Constraint threshold value")
|
||||
source: ConstraintSource = Field(..., description="Where the value comes from")
|
||||
penalty_config: Optional[PenaltyConfig] = None
|
||||
description: Optional[str] = None
|
||||
canvas_position: Optional[CanvasPosition] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Optimization Models
|
||||
# ============================================================================
|
||||
|
||||
class AlgorithmConfig(BaseModel):
|
||||
"""Algorithm-specific settings."""
|
||||
population_size: Optional[int] = None
|
||||
n_generations: Optional[int] = None
|
||||
mutation_prob: Optional[float] = None
|
||||
crossover_prob: Optional[float] = None
|
||||
seed: Optional[int] = None
|
||||
n_startup_trials: Optional[int] = None
|
||||
sigma0: Optional[float] = None
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow additional algorithm-specific fields
|
||||
|
||||
|
||||
class Algorithm(BaseModel):
|
||||
"""Optimization algorithm configuration."""
|
||||
type: AlgorithmType
|
||||
config: Optional[AlgorithmConfig] = None
|
||||
|
||||
|
||||
class OptimizationBudget(BaseModel):
|
||||
"""Computational budget for optimization."""
|
||||
max_trials: Optional[int] = Field(default=None, ge=1, le=10000)
|
||||
max_time_hours: Optional[float] = None
|
||||
convergence_patience: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Stop if no improvement for N trials"
|
||||
)
|
||||
|
||||
|
||||
class SurrogateConfig(BaseModel):
|
||||
"""Neural surrogate model configuration."""
|
||||
n_models: Optional[int] = None
|
||||
architecture: Optional[List[int]] = None
|
||||
train_every_n_trials: Optional[int] = None
|
||||
min_training_samples: Optional[int] = None
|
||||
acquisition_candidates: Optional[int] = None
|
||||
fea_validations_per_round: Optional[int] = None
|
||||
|
||||
|
||||
class Surrogate(BaseModel):
|
||||
"""Surrogate model settings."""
|
||||
enabled: Optional[bool] = None
|
||||
type: Optional[SurrogateType] = None
|
||||
config: Optional[SurrogateConfig] = None
|
||||
|
||||
|
||||
class OptimizationConfig(BaseModel):
|
||||
"""Optimization algorithm configuration."""
|
||||
algorithm: Algorithm
|
||||
budget: OptimizationBudget
|
||||
surrogate: Optional[Surrogate] = None
|
||||
canvas_position: Optional[CanvasPosition] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Workflow Models
|
||||
# ============================================================================
|
||||
|
||||
class WorkflowStage(BaseModel):
|
||||
"""A stage in a multi-stage optimization workflow."""
|
||||
id: str
|
||||
name: str
|
||||
algorithm: Optional[str] = None
|
||||
trials: Optional[int] = None
|
||||
purpose: Optional[str] = None
|
||||
|
||||
|
||||
class WorkflowTransition(BaseModel):
|
||||
"""Transition between workflow stages."""
|
||||
from_: str = Field(..., alias="from")
|
||||
to: str
|
||||
condition: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
populate_by_name = True
|
||||
|
||||
|
||||
class Workflow(BaseModel):
|
||||
"""Multi-stage optimization workflow."""
|
||||
stages: Optional[List[WorkflowStage]] = None
|
||||
transitions: Optional[List[WorkflowTransition]] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Reporting Models
|
||||
# ============================================================================
|
||||
|
||||
class InsightConfig(BaseModel):
|
||||
"""Insight-specific configuration."""
|
||||
include_html: Optional[bool] = None
|
||||
show_pareto_evolution: Optional[bool] = None
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
|
||||
|
||||
class Insight(BaseModel):
|
||||
"""Reporting insight definition."""
|
||||
type: Optional[str] = None
|
||||
for_trials: Optional[str] = None
|
||||
config: Optional[InsightConfig] = None
|
||||
|
||||
|
||||
class ReportingConfig(BaseModel):
|
||||
"""Reporting configuration."""
|
||||
auto_report: Optional[bool] = None
|
||||
report_triggers: Optional[List[str]] = None
|
||||
insights: Optional[List[Insight]] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Canvas Models
|
||||
# ============================================================================
|
||||
|
||||
class CanvasViewport(BaseModel):
|
||||
"""Canvas viewport settings."""
|
||||
x: float = 0
|
||||
y: float = 0
|
||||
zoom: float = 1.0
|
||||
|
||||
|
||||
class CanvasEdge(BaseModel):
|
||||
"""Connection between canvas nodes."""
|
||||
source: str
|
||||
target: str
|
||||
sourceHandle: Optional[str] = None
|
||||
targetHandle: Optional[str] = None
|
||||
|
||||
|
||||
class CanvasGroup(BaseModel):
|
||||
"""Grouping of canvas nodes."""
|
||||
id: str
|
||||
name: str
|
||||
node_ids: List[str]
|
||||
|
||||
|
||||
class CanvasConfig(BaseModel):
|
||||
"""Canvas UI state (persisted for reconstruction)."""
|
||||
layout_version: Optional[str] = None
|
||||
viewport: Optional[CanvasViewport] = None
|
||||
edges: Optional[List[CanvasEdge]] = None
|
||||
groups: Optional[List[CanvasGroup]] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main AtomizerSpec Model
|
||||
# ============================================================================
|
||||
|
||||
class AtomizerSpec(BaseModel):
|
||||
"""
|
||||
AtomizerSpec v2.0 - The unified configuration schema for Atomizer optimization studies.
|
||||
|
||||
This is the single source of truth used by:
|
||||
- Canvas UI (rendering and editing)
|
||||
- Backend API (validation and storage)
|
||||
- Claude Assistant (reading and modifying)
|
||||
- Optimization Engine (execution)
|
||||
"""
|
||||
meta: SpecMeta = Field(..., description="Metadata about the spec")
|
||||
model: ModelConfig = Field(..., description="NX model files and configuration")
|
||||
design_variables: List[DesignVariable] = Field(
|
||||
...,
|
||||
min_length=1,
|
||||
max_length=50,
|
||||
description="Design variables to optimize"
|
||||
)
|
||||
extractors: List[Extractor] = Field(
|
||||
...,
|
||||
min_length=1,
|
||||
description="Physics extractors"
|
||||
)
|
||||
objectives: List[Objective] = Field(
|
||||
...,
|
||||
min_length=1,
|
||||
max_length=5,
|
||||
description="Optimization objectives"
|
||||
)
|
||||
constraints: Optional[List[Constraint]] = Field(
|
||||
default=None,
|
||||
description="Hard and soft constraints"
|
||||
)
|
||||
optimization: OptimizationConfig = Field(..., description="Algorithm configuration")
|
||||
workflow: Optional[Workflow] = Field(default=None, description="Multi-stage workflow")
|
||||
reporting: Optional[ReportingConfig] = Field(default=None, description="Reporting config")
|
||||
canvas: Optional[CanvasConfig] = Field(default=None, description="Canvas UI state")
|
||||
|
||||
@model_validator(mode='after')
|
||||
def validate_references(self) -> 'AtomizerSpec':
|
||||
"""Validate that all references are valid."""
|
||||
# Collect valid extractor IDs and their outputs
|
||||
extractor_outputs: Dict[str, set] = {}
|
||||
for ext in self.extractors:
|
||||
extractor_outputs[ext.id] = {o.name for o in ext.outputs}
|
||||
|
||||
# Validate objective sources
|
||||
for obj in self.objectives:
|
||||
if obj.source.extractor_id not in extractor_outputs:
|
||||
raise ValueError(
|
||||
f"Objective '{obj.name}' references unknown extractor: {obj.source.extractor_id}"
|
||||
)
|
||||
if obj.source.output_name not in extractor_outputs[obj.source.extractor_id]:
|
||||
raise ValueError(
|
||||
f"Objective '{obj.name}' references unknown output: {obj.source.output_name}"
|
||||
)
|
||||
|
||||
# Validate constraint sources
|
||||
if self.constraints:
|
||||
for con in self.constraints:
|
||||
if con.source.extractor_id not in extractor_outputs:
|
||||
raise ValueError(
|
||||
f"Constraint '{con.name}' references unknown extractor: {con.source.extractor_id}"
|
||||
)
|
||||
if con.source.output_name not in extractor_outputs[con.source.extractor_id]:
|
||||
raise ValueError(
|
||||
f"Constraint '{con.name}' references unknown output: {con.source.output_name}"
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
def get_enabled_design_variables(self) -> List[DesignVariable]:
|
||||
"""Return only enabled design variables."""
|
||||
return [dv for dv in self.design_variables if dv.enabled]
|
||||
|
||||
def get_extractor_by_id(self, extractor_id: str) -> Optional[Extractor]:
|
||||
"""Find an extractor by ID."""
|
||||
for ext in self.extractors:
|
||||
if ext.id == extractor_id:
|
||||
return ext
|
||||
return None
|
||||
|
||||
def get_objective_by_id(self, objective_id: str) -> Optional[Objective]:
|
||||
"""Find an objective by ID."""
|
||||
for obj in self.objectives:
|
||||
if obj.id == objective_id:
|
||||
return obj
|
||||
return None
|
||||
|
||||
def get_constraint_by_id(self, constraint_id: str) -> Optional[Constraint]:
|
||||
"""Find a constraint by ID."""
|
||||
if not self.constraints:
|
||||
return None
|
||||
for con in self.constraints:
|
||||
if con.id == constraint_id:
|
||||
return con
|
||||
return None
|
||||
|
||||
def has_custom_extractors(self) -> bool:
|
||||
"""Check if spec has any custom function extractors."""
|
||||
return any(ext.type == ExtractorType.CUSTOM_FUNCTION for ext in self.extractors)
|
||||
|
||||
def is_multi_objective(self) -> bool:
|
||||
"""Check if this is a multi-objective optimization."""
|
||||
return len(self.objectives) > 1
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Validation Response Models
|
||||
# ============================================================================
|
||||
|
||||
class ValidationError(BaseModel):
|
||||
"""A validation error."""
|
||||
type: str # 'schema', 'semantic', 'reference'
|
||||
path: List[str]
|
||||
message: str
|
||||
|
||||
|
||||
class ValidationWarning(BaseModel):
|
||||
"""A validation warning."""
|
||||
type: str
|
||||
path: List[str]
|
||||
message: str
|
||||
|
||||
|
||||
class ValidationSummary(BaseModel):
|
||||
"""Summary of spec contents."""
|
||||
design_variables: int
|
||||
extractors: int
|
||||
objectives: int
|
||||
constraints: int
|
||||
custom_functions: int
|
||||
|
||||
|
||||
class ValidationReport(BaseModel):
|
||||
"""Full validation report."""
|
||||
valid: bool
|
||||
errors: List[ValidationError]
|
||||
warnings: List[ValidationWarning]
|
||||
summary: ValidationSummary
|
||||
654
optimization_engine/config/spec_validator.py
Normal file
654
optimization_engine/config/spec_validator.py
Normal file
@@ -0,0 +1,654 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 Validator
|
||||
|
||||
Provides comprehensive validation including:
|
||||
- JSON Schema validation
|
||||
- Pydantic model validation
|
||||
- Semantic validation (bounds, references, dependencies)
|
||||
- Extractor-specific validation
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from pydantic import ValidationError as PydanticValidationError
|
||||
|
||||
try:
|
||||
import jsonschema
|
||||
HAS_JSONSCHEMA = True
|
||||
except ImportError:
|
||||
HAS_JSONSCHEMA = False
|
||||
|
||||
from .spec_models import (
|
||||
AtomizerSpec,
|
||||
ValidationReport,
|
||||
ValidationError,
|
||||
ValidationWarning,
|
||||
ValidationSummary,
|
||||
ExtractorType,
|
||||
AlgorithmType,
|
||||
ConstraintType,
|
||||
)
|
||||
|
||||
|
||||
class SpecValidationError(Exception):
|
||||
"""Raised when spec validation fails."""
|
||||
|
||||
def __init__(self, message: str, errors: List[ValidationError] = None):
|
||||
super().__init__(message)
|
||||
self.errors = errors or []
|
||||
|
||||
|
||||
class SpecValidator:
|
||||
"""
|
||||
Validates AtomizerSpec v2.0 configurations.
|
||||
|
||||
Provides three levels of validation:
|
||||
1. JSON Schema validation (structural)
|
||||
2. Pydantic model validation (type safety)
|
||||
3. Semantic validation (business logic)
|
||||
"""
|
||||
|
||||
# Path to JSON Schema file
|
||||
SCHEMA_PATH = Path(__file__).parent.parent / "schemas" / "atomizer_spec_v2.json"
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize validator with schema."""
|
||||
self._schema: Optional[Dict] = None
|
||||
|
||||
@property
|
||||
def schema(self) -> Dict:
|
||||
"""Lazy load the JSON Schema."""
|
||||
if self._schema is None:
|
||||
if self.SCHEMA_PATH.exists():
|
||||
with open(self.SCHEMA_PATH) as f:
|
||||
self._schema = json.load(f)
|
||||
else:
|
||||
self._schema = {}
|
||||
return self._schema
|
||||
|
||||
def validate(
|
||||
self,
|
||||
spec_data: Union[Dict[str, Any], AtomizerSpec],
|
||||
strict: bool = True
|
||||
) -> ValidationReport:
|
||||
"""
|
||||
Validate a spec and return a detailed report.
|
||||
|
||||
Args:
|
||||
spec_data: Either a dict or AtomizerSpec instance
|
||||
strict: If True, raise exception on errors; if False, return report only
|
||||
|
||||
Returns:
|
||||
ValidationReport with errors, warnings, and summary
|
||||
|
||||
Raises:
|
||||
SpecValidationError: If strict=True and validation fails
|
||||
"""
|
||||
errors: List[ValidationError] = []
|
||||
warnings: List[ValidationWarning] = []
|
||||
|
||||
# Convert to dict if needed
|
||||
if isinstance(spec_data, AtomizerSpec):
|
||||
data = spec_data.model_dump(mode='json')
|
||||
else:
|
||||
data = spec_data
|
||||
|
||||
# Phase 1: JSON Schema validation
|
||||
schema_errors = self._validate_json_schema(data)
|
||||
errors.extend(schema_errors)
|
||||
|
||||
# Phase 2: Pydantic model validation (only if schema passes)
|
||||
if not schema_errors:
|
||||
pydantic_errors = self._validate_pydantic(data)
|
||||
errors.extend(pydantic_errors)
|
||||
|
||||
# Phase 3: Semantic validation (only if pydantic passes)
|
||||
if not errors:
|
||||
spec = AtomizerSpec.model_validate(data)
|
||||
semantic_errors, semantic_warnings = self._validate_semantic(spec)
|
||||
errors.extend(semantic_errors)
|
||||
warnings.extend(semantic_warnings)
|
||||
|
||||
# Build summary
|
||||
summary = self._build_summary(data)
|
||||
|
||||
# Build report
|
||||
report = ValidationReport(
|
||||
valid=len(errors) == 0,
|
||||
errors=errors,
|
||||
warnings=warnings,
|
||||
summary=summary
|
||||
)
|
||||
|
||||
# Raise if strict mode and errors found
|
||||
if strict and not report.valid:
|
||||
error_messages = "; ".join(e.message for e in report.errors[:3])
|
||||
raise SpecValidationError(
|
||||
f"Spec validation failed: {error_messages}",
|
||||
errors=report.errors
|
||||
)
|
||||
|
||||
return report
|
||||
|
||||
def validate_partial(
|
||||
self,
|
||||
path: str,
|
||||
value: Any,
|
||||
current_spec: AtomizerSpec
|
||||
) -> Tuple[bool, List[str]]:
|
||||
"""
|
||||
Validate a partial update before applying.
|
||||
|
||||
Args:
|
||||
path: JSONPath to the field being updated
|
||||
value: New value
|
||||
current_spec: Current full spec
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, list of error messages)
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Parse path
|
||||
parts = self._parse_path(path)
|
||||
if not parts:
|
||||
return False, ["Invalid path format"]
|
||||
|
||||
# Get target type from path
|
||||
root = parts[0]
|
||||
|
||||
# Validate based on root section
|
||||
if root == "design_variables":
|
||||
errors.extend(self._validate_dv_update(parts, value, current_spec))
|
||||
elif root == "extractors":
|
||||
errors.extend(self._validate_extractor_update(parts, value, current_spec))
|
||||
elif root == "objectives":
|
||||
errors.extend(self._validate_objective_update(parts, value, current_spec))
|
||||
elif root == "constraints":
|
||||
errors.extend(self._validate_constraint_update(parts, value, current_spec))
|
||||
elif root == "optimization":
|
||||
errors.extend(self._validate_optimization_update(parts, value))
|
||||
elif root == "meta":
|
||||
errors.extend(self._validate_meta_update(parts, value))
|
||||
|
||||
return len(errors) == 0, errors
|
||||
|
||||
def _validate_json_schema(self, data: Dict) -> List[ValidationError]:
|
||||
"""Validate against JSON Schema."""
|
||||
errors = []
|
||||
|
||||
if not HAS_JSONSCHEMA or not self.schema:
|
||||
return errors # Skip if jsonschema not available
|
||||
|
||||
try:
|
||||
jsonschema.validate(instance=data, schema=self.schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
errors.append(ValidationError(
|
||||
type="schema",
|
||||
path=list(e.absolute_path),
|
||||
message=e.message
|
||||
))
|
||||
except jsonschema.SchemaError as e:
|
||||
errors.append(ValidationError(
|
||||
type="schema",
|
||||
path=[],
|
||||
message=f"Invalid schema: {e.message}"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_pydantic(self, data: Dict) -> List[ValidationError]:
|
||||
"""Validate using Pydantic models."""
|
||||
errors = []
|
||||
|
||||
try:
|
||||
AtomizerSpec.model_validate(data)
|
||||
except PydanticValidationError as e:
|
||||
for err in e.errors():
|
||||
errors.append(ValidationError(
|
||||
type="schema",
|
||||
path=[str(p) for p in err.get("loc", [])],
|
||||
message=err.get("msg", "Validation error")
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_semantic(
|
||||
self,
|
||||
spec: AtomizerSpec
|
||||
) -> Tuple[List[ValidationError], List[ValidationWarning]]:
|
||||
"""
|
||||
Perform semantic validation.
|
||||
|
||||
Checks business logic and constraints that can't be expressed in schema.
|
||||
"""
|
||||
errors: List[ValidationError] = []
|
||||
warnings: List[ValidationWarning] = []
|
||||
|
||||
# Validate design variable bounds
|
||||
errors.extend(self._validate_dv_bounds(spec))
|
||||
|
||||
# Validate extractor configurations
|
||||
errors.extend(self._validate_extractor_configs(spec))
|
||||
warnings.extend(self._warn_extractor_configs(spec))
|
||||
|
||||
# Validate reference integrity (done in Pydantic, but double-check)
|
||||
errors.extend(self._validate_references(spec))
|
||||
|
||||
# Validate optimization settings
|
||||
errors.extend(self._validate_optimization_settings(spec))
|
||||
warnings.extend(self._warn_optimization_settings(spec))
|
||||
|
||||
# Validate canvas edges
|
||||
warnings.extend(self._validate_canvas_edges(spec))
|
||||
|
||||
# Check for duplicate IDs
|
||||
errors.extend(self._validate_unique_ids(spec))
|
||||
|
||||
# Validate custom function syntax
|
||||
errors.extend(self._validate_custom_functions(spec))
|
||||
|
||||
return errors, warnings
|
||||
|
||||
def _validate_dv_bounds(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate design variable bounds."""
|
||||
errors = []
|
||||
|
||||
for i, dv in enumerate(spec.design_variables):
|
||||
# Check baseline within bounds
|
||||
if dv.baseline is not None:
|
||||
if dv.baseline < dv.bounds.min or dv.baseline > dv.bounds.max:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["design_variables", str(i), "baseline"],
|
||||
message=f"Baseline {dv.baseline} outside bounds [{dv.bounds.min}, {dv.bounds.max}]"
|
||||
))
|
||||
|
||||
# Check step size for integer type
|
||||
if dv.type.value == "integer":
|
||||
range_size = dv.bounds.max - dv.bounds.min
|
||||
if range_size < 1:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["design_variables", str(i), "bounds"],
|
||||
message="Integer variable must have range >= 1"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_extractor_configs(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate extractor-specific configurations."""
|
||||
errors = []
|
||||
|
||||
for i, ext in enumerate(spec.extractors):
|
||||
# Zernike extractors need specific config
|
||||
if ext.type in [ExtractorType.ZERNIKE_OPD, ExtractorType.ZERNIKE_CSV]:
|
||||
if not ext.config:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "config"],
|
||||
message=f"Zernike extractor requires config with radius settings"
|
||||
))
|
||||
elif ext.config:
|
||||
if ext.config.inner_radius_mm is None:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "config", "inner_radius_mm"],
|
||||
message="Zernike extractor requires inner_radius_mm"
|
||||
))
|
||||
if ext.config.outer_radius_mm is None:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "config", "outer_radius_mm"],
|
||||
message="Zernike extractor requires outer_radius_mm"
|
||||
))
|
||||
|
||||
# Mass expression extractor needs expression_name
|
||||
if ext.type == ExtractorType.MASS_EXPRESSION:
|
||||
if not ext.config or not ext.config.expression_name:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "config", "expression_name"],
|
||||
message="Mass expression extractor requires expression_name in config"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _warn_extractor_configs(self, spec: AtomizerSpec) -> List[ValidationWarning]:
|
||||
"""Generate warnings for extractor configurations."""
|
||||
warnings = []
|
||||
|
||||
for i, ext in enumerate(spec.extractors):
|
||||
# Zernike mode count warning
|
||||
if ext.type in [ExtractorType.ZERNIKE_OPD, ExtractorType.ZERNIKE_CSV]:
|
||||
if ext.config and ext.config.n_modes:
|
||||
if ext.config.n_modes > 66:
|
||||
warnings.append(ValidationWarning(
|
||||
type="performance",
|
||||
path=["extractors", str(i), "config", "n_modes"],
|
||||
message=f"n_modes={ext.config.n_modes} is high; consider <=66 for performance"
|
||||
))
|
||||
|
||||
return warnings
|
||||
|
||||
def _validate_references(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate reference integrity."""
|
||||
errors = []
|
||||
|
||||
# Collect all valid IDs
|
||||
dv_ids = {dv.id for dv in spec.design_variables}
|
||||
ext_ids = {ext.id for ext in spec.extractors}
|
||||
ext_outputs: Dict[str, set] = {}
|
||||
for ext in spec.extractors:
|
||||
ext_outputs[ext.id] = {o.name for o in ext.outputs}
|
||||
|
||||
# Validate canvas edges
|
||||
if spec.canvas and spec.canvas.edges:
|
||||
all_ids = dv_ids | ext_ids
|
||||
all_ids.add("model")
|
||||
all_ids.add("solver")
|
||||
all_ids.add("optimization")
|
||||
all_ids.update(obj.id for obj in spec.objectives)
|
||||
if spec.constraints:
|
||||
all_ids.update(con.id for con in spec.constraints)
|
||||
|
||||
for i, edge in enumerate(spec.canvas.edges):
|
||||
if edge.source not in all_ids:
|
||||
errors.append(ValidationError(
|
||||
type="reference",
|
||||
path=["canvas", "edges", str(i), "source"],
|
||||
message=f"Edge source '{edge.source}' not found"
|
||||
))
|
||||
if edge.target not in all_ids:
|
||||
errors.append(ValidationError(
|
||||
type="reference",
|
||||
path=["canvas", "edges", str(i), "target"],
|
||||
message=f"Edge target '{edge.target}' not found"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_optimization_settings(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate optimization settings."""
|
||||
errors = []
|
||||
|
||||
algo_type = spec.optimization.algorithm.type
|
||||
|
||||
# NSGA-II requires multiple objectives
|
||||
if algo_type == AlgorithmType.NSGA_II and len(spec.objectives) < 2:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["optimization", "algorithm", "type"],
|
||||
message="NSGA-II requires at least 2 objectives"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _warn_optimization_settings(self, spec: AtomizerSpec) -> List[ValidationWarning]:
|
||||
"""Generate warnings for optimization settings."""
|
||||
warnings = []
|
||||
|
||||
budget = spec.optimization.budget
|
||||
|
||||
# Warn about small trial budgets
|
||||
if budget.max_trials and budget.max_trials < 20:
|
||||
warnings.append(ValidationWarning(
|
||||
type="recommendation",
|
||||
path=["optimization", "budget", "max_trials"],
|
||||
message=f"max_trials={budget.max_trials} is low; recommend >= 20 for convergence"
|
||||
))
|
||||
|
||||
# Warn about large design space with small budget
|
||||
num_dvs = len(spec.get_enabled_design_variables())
|
||||
if budget.max_trials and num_dvs > 5 and budget.max_trials < num_dvs * 10:
|
||||
warnings.append(ValidationWarning(
|
||||
type="recommendation",
|
||||
path=["optimization", "budget", "max_trials"],
|
||||
message=f"{num_dvs} DVs suggest at least {num_dvs * 10} trials"
|
||||
))
|
||||
|
||||
return warnings
|
||||
|
||||
def _validate_canvas_edges(self, spec: AtomizerSpec) -> List[ValidationWarning]:
|
||||
"""Validate canvas edge structure."""
|
||||
warnings = []
|
||||
|
||||
if not spec.canvas or not spec.canvas.edges:
|
||||
warnings.append(ValidationWarning(
|
||||
type="completeness",
|
||||
path=["canvas", "edges"],
|
||||
message="No canvas edges defined; canvas may not render correctly"
|
||||
))
|
||||
|
||||
return warnings
|
||||
|
||||
def _validate_unique_ids(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate that all IDs are unique."""
|
||||
errors = []
|
||||
seen_ids: Dict[str, str] = {}
|
||||
|
||||
# Check all ID-bearing elements
|
||||
for i, dv in enumerate(spec.design_variables):
|
||||
if dv.id in seen_ids:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["design_variables", str(i), "id"],
|
||||
message=f"Duplicate ID '{dv.id}' (also in {seen_ids[dv.id]})"
|
||||
))
|
||||
seen_ids[dv.id] = f"design_variables[{i}]"
|
||||
|
||||
for i, ext in enumerate(spec.extractors):
|
||||
if ext.id in seen_ids:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "id"],
|
||||
message=f"Duplicate ID '{ext.id}' (also in {seen_ids[ext.id]})"
|
||||
))
|
||||
seen_ids[ext.id] = f"extractors[{i}]"
|
||||
|
||||
for i, obj in enumerate(spec.objectives):
|
||||
if obj.id in seen_ids:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["objectives", str(i), "id"],
|
||||
message=f"Duplicate ID '{obj.id}' (also in {seen_ids[obj.id]})"
|
||||
))
|
||||
seen_ids[obj.id] = f"objectives[{i}]"
|
||||
|
||||
if spec.constraints:
|
||||
for i, con in enumerate(spec.constraints):
|
||||
if con.id in seen_ids:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["constraints", str(i), "id"],
|
||||
message=f"Duplicate ID '{con.id}' (also in {seen_ids[con.id]})"
|
||||
))
|
||||
seen_ids[con.id] = f"constraints[{i}]"
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_custom_functions(self, spec: AtomizerSpec) -> List[ValidationError]:
|
||||
"""Validate custom function Python syntax."""
|
||||
errors = []
|
||||
|
||||
for i, ext in enumerate(spec.extractors):
|
||||
if ext.type == ExtractorType.CUSTOM_FUNCTION and ext.function:
|
||||
if ext.function.source_code:
|
||||
try:
|
||||
compile(ext.function.source_code, f"<custom:{ext.name}>", "exec")
|
||||
except SyntaxError as e:
|
||||
errors.append(ValidationError(
|
||||
type="semantic",
|
||||
path=["extractors", str(i), "function", "source_code"],
|
||||
message=f"Python syntax error: {e.msg} at line {e.lineno}"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def _build_summary(self, data: Dict) -> ValidationSummary:
|
||||
"""Build validation summary."""
|
||||
extractors = data.get("extractors", [])
|
||||
custom_count = sum(
|
||||
1 for e in extractors
|
||||
if e.get("type") == "custom_function" or not e.get("builtin", True)
|
||||
)
|
||||
|
||||
return ValidationSummary(
|
||||
design_variables=len(data.get("design_variables", [])),
|
||||
extractors=len(extractors),
|
||||
objectives=len(data.get("objectives", [])),
|
||||
constraints=len(data.get("constraints", []) or []),
|
||||
custom_functions=custom_count
|
||||
)
|
||||
|
||||
def _parse_path(self, path: str) -> List[str]:
|
||||
"""Parse a JSONPath-style path into parts."""
|
||||
import re
|
||||
# Handle both dot notation and bracket notation
|
||||
# e.g., "design_variables[0].bounds.max" or "objectives.0.weight"
|
||||
parts = []
|
||||
for part in re.split(r'\.|\[|\]', path):
|
||||
if part:
|
||||
parts.append(part)
|
||||
return parts
|
||||
|
||||
def _validate_dv_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any,
|
||||
spec: AtomizerSpec
|
||||
) -> List[str]:
|
||||
"""Validate a design variable update."""
|
||||
errors = []
|
||||
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
idx = int(parts[1])
|
||||
if idx >= len(spec.design_variables):
|
||||
errors.append(f"Design variable index {idx} out of range")
|
||||
except ValueError:
|
||||
errors.append(f"Invalid design variable index: {parts[1]}")
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_extractor_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any,
|
||||
spec: AtomizerSpec
|
||||
) -> List[str]:
|
||||
"""Validate an extractor update."""
|
||||
errors = []
|
||||
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
idx = int(parts[1])
|
||||
if idx >= len(spec.extractors):
|
||||
errors.append(f"Extractor index {idx} out of range")
|
||||
except ValueError:
|
||||
errors.append(f"Invalid extractor index: {parts[1]}")
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_objective_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any,
|
||||
spec: AtomizerSpec
|
||||
) -> List[str]:
|
||||
"""Validate an objective update."""
|
||||
errors = []
|
||||
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
idx = int(parts[1])
|
||||
if idx >= len(spec.objectives):
|
||||
errors.append(f"Objective index {idx} out of range")
|
||||
except ValueError:
|
||||
errors.append(f"Invalid objective index: {parts[1]}")
|
||||
|
||||
# Validate weight
|
||||
if len(parts) >= 3 and parts[2] == "weight":
|
||||
if not isinstance(value, (int, float)) or value < 0:
|
||||
errors.append("Weight must be a non-negative number")
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_constraint_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any,
|
||||
spec: AtomizerSpec
|
||||
) -> List[str]:
|
||||
"""Validate a constraint update."""
|
||||
errors = []
|
||||
|
||||
if not spec.constraints:
|
||||
errors.append("No constraints defined")
|
||||
return errors
|
||||
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
idx = int(parts[1])
|
||||
if idx >= len(spec.constraints):
|
||||
errors.append(f"Constraint index {idx} out of range")
|
||||
except ValueError:
|
||||
errors.append(f"Invalid constraint index: {parts[1]}")
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_optimization_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any
|
||||
) -> List[str]:
|
||||
"""Validate an optimization update."""
|
||||
errors = []
|
||||
|
||||
if len(parts) >= 2:
|
||||
if parts[1] == "algorithm" and len(parts) >= 3:
|
||||
if parts[2] == "type":
|
||||
valid_types = [t.value for t in AlgorithmType]
|
||||
if value not in valid_types:
|
||||
errors.append(f"Invalid algorithm type. Valid: {valid_types}")
|
||||
|
||||
return errors
|
||||
|
||||
def _validate_meta_update(
|
||||
self,
|
||||
parts: List[str],
|
||||
value: Any
|
||||
) -> List[str]:
|
||||
"""Validate a meta update."""
|
||||
errors = []
|
||||
|
||||
if len(parts) >= 2:
|
||||
if parts[1] == "study_name":
|
||||
import re
|
||||
if not re.match(r"^[a-z0-9_]+$", str(value)):
|
||||
errors.append("study_name must be snake_case (lowercase, numbers, underscores)")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
# Module-level convenience function
|
||||
def validate_spec(
|
||||
spec_data: Union[Dict[str, Any], AtomizerSpec],
|
||||
strict: bool = True
|
||||
) -> ValidationReport:
|
||||
"""
|
||||
Validate an AtomizerSpec.
|
||||
|
||||
Args:
|
||||
spec_data: Spec data (dict or AtomizerSpec)
|
||||
strict: Raise exception on errors
|
||||
|
||||
Returns:
|
||||
ValidationReport
|
||||
|
||||
Raises:
|
||||
SpecValidationError: If strict=True and validation fails
|
||||
"""
|
||||
validator = SpecValidator()
|
||||
return validator.validate(spec_data, strict=strict)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user