feat(dashboard): Enhanced chat, spec management, and Claude integration

Backend:
- spec.py: New AtomizerSpec REST API endpoints
- spec_manager.py: SpecManager service for unified config
- interview_engine.py: Study creation interview logic
- claude.py: Enhanced Claude API with context
- optimization.py: Extended optimization endpoints
- context_builder.py, session_manager.py: Improved services

Frontend:
- Chat components: Enhanced message rendering, tool call cards
- Hooks: useClaudeCode, useSpecWebSocket, improved useChat
- Pages: Updated Dashboard, Analysis, Insights, Setup, Home
- Components: ParallelCoordinatesPlot, ParetoPlot improvements
- App.tsx: Route updates for canvas/studio

Infrastructure:
- vite.config.ts: Build configuration updates
- start/stop-dashboard.bat: Script improvements
This commit is contained in:
2026-01-20 13:10:47 -05:00
parent b05412f807
commit ba0b9a1fae
31 changed files with 4836 additions and 349 deletions

View File

@@ -3,5 +3,13 @@ Atomizer Dashboard Services
"""
from .claude_agent import AtomizerClaudeAgent
from .spec_manager import SpecManager, SpecManagerError, SpecNotFoundError, SpecConflictError, get_spec_manager
__all__ = ['AtomizerClaudeAgent']
__all__ = [
'AtomizerClaudeAgent',
'SpecManager',
'SpecManagerError',
'SpecNotFoundError',
'SpecConflictError',
'get_spec_manager',
]

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,11 @@ class ContextBuilder:
# Canvas context takes priority - if user is working on a canvas, include it
if canvas_state:
node_count = len(canvas_state.get("nodes", []))
print(f"[ContextBuilder] Including canvas context with {node_count} nodes")
parts.append(self._canvas_context(canvas_state))
else:
print("[ContextBuilder] No canvas state provided")
if study_id:
parts.append(self._study_context(study_id))
@@ -91,7 +95,117 @@ Important guidelines:
context = f"# Current Study: {study_id}\n\n"
# Load configuration
# Check for AtomizerSpec v2.0 first (preferred)
spec_path = study_dir / "1_setup" / "atomizer_spec.json"
if not spec_path.exists():
spec_path = study_dir / "atomizer_spec.json"
if spec_path.exists():
context += self._spec_context(spec_path)
else:
# Fall back to legacy optimization_config.json
context += self._legacy_config_context(study_dir)
# Check for results
db_path = study_dir / "3_results" / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(db_path)
count = conn.execute(
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
).fetchone()[0]
best = conn.execute("""
SELECT MIN(tv.value) FROM trial_values tv
JOIN trials t ON tv.trial_id = t.trial_id
WHERE t.state = 'COMPLETE'
""").fetchone()[0]
context += f"\n## Results Status\n\n"
context += f"- **Trials completed**: {count}\n"
if best is not None:
context += f"- **Best objective**: {best:.6f}\n"
conn.close()
except Exception:
pass
return context
def _spec_context(self, spec_path: Path) -> str:
"""Build context from AtomizerSpec v2.0 file"""
context = "**Format**: AtomizerSpec v2.0\n\n"
try:
with open(spec_path) as f:
spec = json.load(f)
context += "## Configuration\n\n"
# Design variables
dvs = spec.get("design_variables", [])
if dvs:
context += "**Design Variables:**\n"
for dv in dvs[:10]:
bounds = dv.get("bounds", {})
bound_str = f"[{bounds.get('min', '?')}, {bounds.get('max', '?')}]"
enabled = "" if dv.get("enabled", True) else ""
context += f"- {dv.get('name', 'unnamed')}: {bound_str} {enabled}\n"
if len(dvs) > 10:
context += f"- ... and {len(dvs) - 10} more\n"
# Extractors
extractors = spec.get("extractors", [])
if extractors:
context += "\n**Extractors:**\n"
for ext in extractors:
ext_type = ext.get("type", "unknown")
outputs = ext.get("outputs", [])
output_names = [o.get("name", "?") for o in outputs[:3]]
builtin = "builtin" if ext.get("builtin", True) else "custom"
context += f"- {ext.get('name', 'unnamed')} ({ext_type}, {builtin}): outputs {output_names}\n"
# Objectives
objs = spec.get("objectives", [])
if objs:
context += "\n**Objectives:**\n"
for obj in objs:
direction = obj.get("direction", "minimize")
weight = obj.get("weight", 1.0)
context += f"- {obj.get('name', 'unnamed')} ({direction}, weight={weight})\n"
# Constraints
constraints = spec.get("constraints", [])
if constraints:
context += "\n**Constraints:**\n"
for c in constraints:
op = c.get("operator", "<=")
thresh = c.get("threshold", "?")
context += f"- {c.get('name', 'unnamed')}: {op} {thresh}\n"
# Optimization settings
opt = spec.get("optimization", {})
algo = opt.get("algorithm", {})
budget = opt.get("budget", {})
method = algo.get("type", "TPE")
max_trials = budget.get("max_trials", "not set")
context += f"\n**Optimization**: {method}, max_trials: {max_trials}\n"
# Surrogate
surrogate = opt.get("surrogate", {})
if surrogate.get("enabled"):
sur_type = surrogate.get("type", "gaussian_process")
context += f"**Surrogate**: {sur_type} enabled\n"
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Spec file exists but could not be parsed: {e}*\n"
return context
def _legacy_config_context(self, study_dir: Path) -> str:
"""Build context from legacy optimization_config.json"""
context = "**Format**: Legacy optimization_config.json\n\n"
config_path = study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = study_dir / "optimization_config.json"
@@ -135,30 +249,8 @@ Important guidelines:
except (json.JSONDecodeError, IOError) as e:
context += f"\n*Config file exists but could not be parsed: {e}*\n"
# Check for results
db_path = study_dir / "3_results" / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(db_path)
count = conn.execute(
"SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'"
).fetchone()[0]
best = conn.execute("""
SELECT MIN(tv.value) FROM trial_values tv
JOIN trials t ON tv.trial_id = t.trial_id
WHERE t.state = 'COMPLETE'
""").fetchone()[0]
context += f"\n## Results Status\n\n"
context += f"- **Trials completed**: {count}\n"
if best is not None:
context += f"- **Best objective**: {best:.6f}\n"
conn.close()
except Exception:
pass
else:
context += "*No configuration file found.*\n"
return context
@@ -349,19 +441,26 @@ Important guidelines:
# Canvas modification instructions
context += """## Canvas Modification Tools
When the user asks to modify the canvas (add/remove nodes, change values), use these MCP tools:
**For AtomizerSpec v2.0 studies (preferred):**
Use spec tools when working with v2.0 studies (check if study uses `atomizer_spec.json`):
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
**For Legacy Canvas (optimization_config.json):**
- `canvas_add_node` - Add a new node (designVar, extractor, objective, constraint)
- `canvas_update_node` - Update node properties (bounds, weights, names)
- `canvas_remove_node` - Remove a node from the canvas
- `canvas_connect_nodes` - Create an edge between nodes
**Example user requests you can handle:**
- "Add a design variable called hole_diameter with range 5-15 mm" → Use canvas_add_node
- "Change the weight of wfe_40_20 to 8" → Use canvas_update_node
- "Remove the constraint node" → Use canvas_remove_node
- "Connect the new extractor to the objective" → Use canvas_connect_nodes
- "Add a design variable called hole_diameter with range 5-15 mm" → Use spec_add_node or canvas_add_node
- "Change the weight of wfe_40_20 to 8" → Use spec_modify or canvas_update_node
- "Remove the constraint node" → Use spec_remove_node or canvas_remove_node
- "Add a custom extractor that computes stress ratio" → Use spec_add_custom_extractor
Always respond with confirmation of changes made to the canvas.
Always respond with confirmation of changes made to the canvas/spec.
"""
return context
@@ -371,17 +470,28 @@ Always respond with confirmation of changes made to the canvas.
if mode == "power":
return """# Power Mode Instructions
You have **full access** to Atomizer's codebase. You can:
- Edit any file using `edit_file` tool
- Create new files with `create_file` tool
- Create new extractors with `create_extractor` tool
- Run shell commands with `run_shell_command` tool
- Search codebase with `search_codebase` tool
- Commit and push changes
You have **FULL ACCESS** to modify Atomizer studies. **DO NOT ASK FOR PERMISSION** - just do it.
**Use these powers responsibly.** Always explain what you're doing and why.
## Direct Actions (no confirmation needed):
- **Add design variables**: Use `canvas_add_node` or `spec_add_node` with node_type="designVar"
- **Add extractors**: Use `canvas_add_node` with node_type="extractor"
- **Add objectives**: Use `canvas_add_node` with node_type="objective"
- **Add constraints**: Use `canvas_add_node` with node_type="constraint"
- **Update node properties**: Use `canvas_update_node` or `spec_modify`
- **Remove nodes**: Use `canvas_remove_node`
- **Edit atomizer_spec.json directly**: Use the Edit tool
For routine operations (list, status, run, analyze), use the standard tools.
## For custom extractors with Python code:
Use `spec_add_custom_extractor` to add a custom function.
## IMPORTANT:
- You have --dangerously-skip-permissions enabled
- The user has explicitly granted you power mode access
- **ACT IMMEDIATELY** when asked to add/modify/remove things
- Explain what you did AFTER doing it, not before
- Do NOT say "I need permission" - you already have it
Example: If user says "add a volume extractor", immediately use canvas_add_node to add it.
"""
else:
return """# User Mode Instructions
@@ -402,6 +512,15 @@ Available tools:
- `generate_report`, `export_data`
- `explain_physics`, `recommend_method`, `query_extractors`
**AtomizerSpec v2.0 Tools (preferred for new studies):**
- `spec_get` - Get the full AtomizerSpec for a study
- `spec_modify` - Modify spec values using JSONPath (e.g., "design_variables[0].bounds.min")
- `spec_add_node` - Add design variables, extractors, objectives, or constraints
- `spec_remove_node` - Remove nodes from the spec
- `spec_validate` - Validate spec against JSON Schema
- `spec_add_custom_extractor` - Add a Python-based custom extractor function
- `spec_create_from_description` - Create a new study from natural language description
**Canvas Tools (for visual workflow builder):**
- `validate_canvas_intent` - Validate a canvas-generated optimization intent
- `execute_canvas_intent` - Create a study from a canvas intent

View File

@@ -0,0 +1,454 @@
"""
Interview Engine - Guided Study Creation through Conversation
Provides a structured interview flow for creating optimization studies.
Claude uses this to gather information step-by-step, building a complete
atomizer_spec.json through natural conversation.
"""
from typing import Dict, Any, List, Optional, Literal
from dataclasses import dataclass, field
from enum import Enum
from datetime import datetime
import json
class InterviewState(str, Enum):
"""Current phase of the interview"""
NOT_STARTED = "not_started"
GATHERING_BASICS = "gathering_basics" # Name, description, goals
GATHERING_MODEL = "gathering_model" # Model file, solver type
GATHERING_VARIABLES = "gathering_variables" # Design variables
GATHERING_EXTRACTORS = "gathering_extractors" # Physics extractors
GATHERING_OBJECTIVES = "gathering_objectives" # Objectives
GATHERING_CONSTRAINTS = "gathering_constraints" # Constraints
GATHERING_SETTINGS = "gathering_settings" # Algorithm, trials
REVIEW = "review" # Review before creation
COMPLETED = "completed"
@dataclass
class InterviewData:
"""Accumulated data from the interview"""
# Basics
study_name: Optional[str] = None
category: Optional[str] = None
description: Optional[str] = None
goals: List[str] = field(default_factory=list)
# Model
sim_file: Optional[str] = None
prt_file: Optional[str] = None
solver_type: str = "nastran"
# Design variables
design_variables: List[Dict[str, Any]] = field(default_factory=list)
# Extractors
extractors: List[Dict[str, Any]] = field(default_factory=list)
# Objectives
objectives: List[Dict[str, Any]] = field(default_factory=list)
# Constraints
constraints: List[Dict[str, Any]] = field(default_factory=list)
# Settings
algorithm: str = "TPE"
max_trials: int = 100
def to_spec(self) -> Dict[str, Any]:
"""Convert interview data to atomizer_spec.json format"""
# Generate IDs for each element
dvs_with_ids = []
for i, dv in enumerate(self.design_variables):
dv_copy = dv.copy()
dv_copy['id'] = f"dv_{i+1:03d}"
dv_copy['canvas_position'] = {'x': 50, 'y': 100 + i * 80}
dvs_with_ids.append(dv_copy)
exts_with_ids = []
for i, ext in enumerate(self.extractors):
ext_copy = ext.copy()
ext_copy['id'] = f"ext_{i+1:03d}"
ext_copy['canvas_position'] = {'x': 400, 'y': 100 + i * 80}
exts_with_ids.append(ext_copy)
objs_with_ids = []
for i, obj in enumerate(self.objectives):
obj_copy = obj.copy()
obj_copy['id'] = f"obj_{i+1:03d}"
obj_copy['canvas_position'] = {'x': 750, 'y': 100 + i * 80}
objs_with_ids.append(obj_copy)
cons_with_ids = []
for i, con in enumerate(self.constraints):
con_copy = con.copy()
con_copy['id'] = f"con_{i+1:03d}"
con_copy['canvas_position'] = {'x': 750, 'y': 400 + i * 80}
cons_with_ids.append(con_copy)
return {
"meta": {
"version": "2.0",
"study_name": self.study_name or "untitled_study",
"description": self.description or "",
"created_at": datetime.now().isoformat(),
"created_by": "interview",
"modified_at": datetime.now().isoformat(),
"modified_by": "interview"
},
"model": {
"sim": {
"path": self.sim_file or "",
"solver": self.solver_type
}
},
"design_variables": dvs_with_ids,
"extractors": exts_with_ids,
"objectives": objs_with_ids,
"constraints": cons_with_ids,
"optimization": {
"algorithm": {
"type": self.algorithm
},
"budget": {
"max_trials": self.max_trials
}
},
"canvas": {
"edges": [],
"layout_version": "2.0"
}
}
class InterviewEngine:
"""
Manages the interview flow for study creation.
Usage:
1. Create engine: engine = InterviewEngine()
2. Start interview: engine.start()
3. Record answers: engine.record_answer("study_name", "bracket_opt")
4. Check progress: engine.get_progress()
5. Generate spec: engine.finalize()
"""
def __init__(self):
self.state = InterviewState.NOT_STARTED
self.data = InterviewData()
self.questions_asked: List[str] = []
self.errors: List[str] = []
def start(self) -> Dict[str, Any]:
"""Start the interview process"""
self.state = InterviewState.GATHERING_BASICS
return {
"state": self.state.value,
"message": "Let's create a new optimization study! I'll guide you through the process.",
"next_questions": self.get_current_questions()
}
def get_current_questions(self) -> List[Dict[str, Any]]:
"""Get the questions for the current interview state"""
questions = {
InterviewState.GATHERING_BASICS: [
{
"field": "study_name",
"question": "What would you like to name this study?",
"hint": "Use snake_case, e.g., 'bracket_mass_optimization'",
"required": True
},
{
"field": "category",
"question": "What category should this study be in?",
"hint": "e.g., 'Simple_Bracket', 'M1_Mirror', or leave blank for root",
"required": False
},
{
"field": "description",
"question": "Briefly describe what you're trying to optimize",
"hint": "e.g., 'Minimize bracket mass while maintaining stiffness'",
"required": True
}
],
InterviewState.GATHERING_MODEL: [
{
"field": "sim_file",
"question": "What is the path to your simulation (.sim) file?",
"hint": "Relative path from the study folder, e.g., '1_setup/Model_sim1.sim'",
"required": True
}
],
InterviewState.GATHERING_VARIABLES: [
{
"field": "design_variable",
"question": "What parameters do you want to optimize?",
"hint": "Tell me the NX expression names and their bounds",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_EXTRACTORS: [
{
"field": "extractor",
"question": "What physics quantities do you want to extract from FEA?",
"hint": "e.g., mass, max displacement, max stress, frequency, Zernike WFE",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_OBJECTIVES: [
{
"field": "objective",
"question": "What do you want to optimize?",
"hint": "Tell me which extracted quantities to minimize or maximize",
"required": True,
"multi": True
}
],
InterviewState.GATHERING_CONSTRAINTS: [
{
"field": "constraint",
"question": "Do you have any constraints? (e.g., max stress, min frequency)",
"hint": "You can say 'none' if you don't have any",
"required": False,
"multi": True
}
],
InterviewState.GATHERING_SETTINGS: [
{
"field": "algorithm",
"question": "Which optimization algorithm would you like to use?",
"hint": "Options: TPE (default), CMA-ES, NSGA-II, RandomSearch",
"required": False
},
{
"field": "max_trials",
"question": "How many trials (FEA evaluations) should we run?",
"hint": "Default is 100. More trials = better results but longer runtime",
"required": False
}
],
InterviewState.REVIEW: [
{
"field": "confirm",
"question": "Does this configuration look correct? (yes/no)",
"required": True
}
]
}
return questions.get(self.state, [])
def record_answer(self, field: str, value: Any) -> Dict[str, Any]:
"""Record an answer and potentially advance the state"""
self.questions_asked.append(field)
# Handle different field types
if field == "study_name":
self.data.study_name = value
elif field == "category":
self.data.category = value if value else None
elif field == "description":
self.data.description = value
elif field == "sim_file":
self.data.sim_file = value
elif field == "design_variable":
# Value should be a dict with name, min, max, etc.
if isinstance(value, dict):
self.data.design_variables.append(value)
elif isinstance(value, list):
self.data.design_variables.extend(value)
elif field == "extractor":
if isinstance(value, dict):
self.data.extractors.append(value)
elif isinstance(value, list):
self.data.extractors.extend(value)
elif field == "objective":
if isinstance(value, dict):
self.data.objectives.append(value)
elif isinstance(value, list):
self.data.objectives.extend(value)
elif field == "constraint":
if value and value.lower() not in ["none", "no", "skip"]:
if isinstance(value, dict):
self.data.constraints.append(value)
elif isinstance(value, list):
self.data.constraints.extend(value)
elif field == "algorithm":
if value in ["TPE", "CMA-ES", "NSGA-II", "RandomSearch"]:
self.data.algorithm = value
elif field == "max_trials":
try:
self.data.max_trials = int(value)
except (ValueError, TypeError):
pass
elif field == "confirm":
if value.lower() in ["yes", "y", "confirm", "ok"]:
self.state = InterviewState.COMPLETED
return {
"state": self.state.value,
"recorded": {field: value},
"data_so_far": self.get_summary()
}
def advance_state(self) -> Dict[str, Any]:
"""Advance to the next interview state"""
state_order = [
InterviewState.NOT_STARTED,
InterviewState.GATHERING_BASICS,
InterviewState.GATHERING_MODEL,
InterviewState.GATHERING_VARIABLES,
InterviewState.GATHERING_EXTRACTORS,
InterviewState.GATHERING_OBJECTIVES,
InterviewState.GATHERING_CONSTRAINTS,
InterviewState.GATHERING_SETTINGS,
InterviewState.REVIEW,
InterviewState.COMPLETED
]
current_idx = state_order.index(self.state)
if current_idx < len(state_order) - 1:
self.state = state_order[current_idx + 1]
return {
"state": self.state.value,
"next_questions": self.get_current_questions()
}
def get_summary(self) -> Dict[str, Any]:
"""Get a summary of collected data"""
return {
"study_name": self.data.study_name,
"category": self.data.category,
"description": self.data.description,
"model": self.data.sim_file,
"design_variables": len(self.data.design_variables),
"extractors": len(self.data.extractors),
"objectives": len(self.data.objectives),
"constraints": len(self.data.constraints),
"algorithm": self.data.algorithm,
"max_trials": self.data.max_trials
}
def get_progress(self) -> Dict[str, Any]:
"""Get interview progress information"""
state_progress = {
InterviewState.NOT_STARTED: 0,
InterviewState.GATHERING_BASICS: 15,
InterviewState.GATHERING_MODEL: 25,
InterviewState.GATHERING_VARIABLES: 40,
InterviewState.GATHERING_EXTRACTORS: 55,
InterviewState.GATHERING_OBJECTIVES: 70,
InterviewState.GATHERING_CONSTRAINTS: 80,
InterviewState.GATHERING_SETTINGS: 90,
InterviewState.REVIEW: 95,
InterviewState.COMPLETED: 100
}
return {
"state": self.state.value,
"progress_percent": state_progress.get(self.state, 0),
"summary": self.get_summary(),
"current_questions": self.get_current_questions()
}
def validate(self) -> Dict[str, Any]:
"""Validate the collected data before finalizing"""
errors = []
warnings = []
# Required fields
if not self.data.study_name:
errors.append("Study name is required")
if not self.data.design_variables:
errors.append("At least one design variable is required")
if not self.data.extractors:
errors.append("At least one extractor is required")
if not self.data.objectives:
errors.append("At least one objective is required")
# Warnings
if not self.data.sim_file:
warnings.append("No simulation file specified - you'll need to add one manually")
if not self.data.constraints:
warnings.append("No constraints defined - optimization will be unconstrained")
return {
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings
}
def finalize(self) -> Dict[str, Any]:
"""Generate the final atomizer_spec.json"""
validation = self.validate()
if not validation["valid"]:
return {
"success": False,
"errors": validation["errors"]
}
spec = self.data.to_spec()
return {
"success": True,
"spec": spec,
"warnings": validation.get("warnings", [])
}
def to_dict(self) -> Dict[str, Any]:
"""Serialize engine state for persistence"""
return {
"state": self.state.value,
"data": {
"study_name": self.data.study_name,
"category": self.data.category,
"description": self.data.description,
"goals": self.data.goals,
"sim_file": self.data.sim_file,
"prt_file": self.data.prt_file,
"solver_type": self.data.solver_type,
"design_variables": self.data.design_variables,
"extractors": self.data.extractors,
"objectives": self.data.objectives,
"constraints": self.data.constraints,
"algorithm": self.data.algorithm,
"max_trials": self.data.max_trials
},
"questions_asked": self.questions_asked,
"errors": self.errors
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "InterviewEngine":
"""Restore engine from serialized state"""
engine = cls()
engine.state = InterviewState(data.get("state", "not_started"))
d = data.get("data", {})
engine.data.study_name = d.get("study_name")
engine.data.category = d.get("category")
engine.data.description = d.get("description")
engine.data.goals = d.get("goals", [])
engine.data.sim_file = d.get("sim_file")
engine.data.prt_file = d.get("prt_file")
engine.data.solver_type = d.get("solver_type", "nastran")
engine.data.design_variables = d.get("design_variables", [])
engine.data.extractors = d.get("extractors", [])
engine.data.objectives = d.get("objectives", [])
engine.data.constraints = d.get("constraints", [])
engine.data.algorithm = d.get("algorithm", "TPE")
engine.data.max_trials = d.get("max_trials", 100)
engine.questions_asked = data.get("questions_asked", [])
engine.errors = data.get("errors", [])
return engine

View File

@@ -219,6 +219,18 @@ class SessionManager:
full_response = result["stdout"] or ""
if full_response:
# Check if response contains canvas modifications (from MCP tools)
import logging
logger = logging.getLogger(__name__)
modifications = self._extract_canvas_modifications(full_response)
logger.info(f"[SEND_MSG] Found {len(modifications)} canvas modifications to send")
for mod in modifications:
logger.info(f"[SEND_MSG] Sending canvas_modification: {mod.get('action')} {mod.get('nodeType')}")
yield {"type": "canvas_modification", "modification": mod}
# Always send the text response
yield {"type": "text", "content": full_response}
if result["returncode"] != 0 and result["stderr"]:
@@ -292,6 +304,90 @@ class SessionManager:
**({} if not db_record else {"db_record": db_record}),
}
def _extract_canvas_modifications(self, response: str) -> List[Dict]:
"""
Extract canvas modification objects from Claude's response.
MCP tools like canvas_add_node return JSON with a 'modification' field.
This method finds and extracts those modifications so the frontend can apply them.
"""
import re
import logging
logger = logging.getLogger(__name__)
modifications = []
# Debug: log what we're searching
logger.info(f"[CANVAS_MOD] Searching response ({len(response)} chars) for modifications")
# Check if "modification" even exists in the response
if '"modification"' not in response:
logger.info("[CANVAS_MOD] No 'modification' key found in response")
return modifications
try:
# Method 1: Look for JSON in code fences
code_block_pattern = r'```(?:json)?\s*([\s\S]*?)```'
for match in re.finditer(code_block_pattern, response):
block_content = match.group(1).strip()
try:
obj = json.loads(block_content)
if isinstance(obj, dict) and 'modification' in obj:
logger.info(f"[CANVAS_MOD] Found modification in code fence: {obj['modification']}")
modifications.append(obj['modification'])
except json.JSONDecodeError:
continue
# Method 2: Find JSON objects using proper brace matching
# This handles nested objects correctly
i = 0
while i < len(response):
if response[i] == '{':
# Found a potential JSON start, find matching close
brace_count = 1
j = i + 1
in_string = False
escape_next = False
while j < len(response) and brace_count > 0:
char = response[j]
if escape_next:
escape_next = False
elif char == '\\':
escape_next = True
elif char == '"' and not escape_next:
in_string = not in_string
elif not in_string:
if char == '{':
brace_count += 1
elif char == '}':
brace_count -= 1
j += 1
if brace_count == 0:
potential_json = response[i:j]
try:
obj = json.loads(potential_json)
if isinstance(obj, dict) and 'modification' in obj:
mod = obj['modification']
# Avoid duplicates
if mod not in modifications:
logger.info(f"[CANVAS_MOD] Found inline modification: action={mod.get('action')}, nodeType={mod.get('nodeType')}")
modifications.append(mod)
except json.JSONDecodeError as e:
# Not valid JSON, skip
pass
i = j
else:
i += 1
except Exception as e:
logger.error(f"[CANVAS_MOD] Error extracting modifications: {e}")
logger.info(f"[CANVAS_MOD] Extracted {len(modifications)} modification(s)")
return modifications
def _build_mcp_config(self, mode: Literal["user", "power"]) -> dict:
"""Build MCP configuration for Claude"""
return {

View File

@@ -0,0 +1,747 @@
"""
SpecManager Service
Central service for managing AtomizerSpec v2.0.
All spec modifications flow through this service.
Features:
- Load/save specs with validation
- Atomic writes with conflict detection
- Patch operations with JSONPath support
- Node CRUD operations
- Custom function support
- WebSocket broadcast integration
"""
import hashlib
import json
import re
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
# Add optimization_engine to path if needed
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
if str(ATOMIZER_ROOT) not in sys.path:
sys.path.insert(0, str(ATOMIZER_ROOT))
from optimization_engine.config.spec_models import (
AtomizerSpec,
DesignVariable,
Extractor,
Objective,
Constraint,
CanvasPosition,
CanvasEdge,
ExtractorType,
CustomFunction,
ExtractorOutput,
ValidationReport,
)
from optimization_engine.config.spec_validator import (
SpecValidator,
SpecValidationError,
)
class SpecManagerError(Exception):
"""Base error for SpecManager operations."""
pass
class SpecNotFoundError(SpecManagerError):
"""Raised when spec file doesn't exist."""
pass
class SpecConflictError(SpecManagerError):
"""Raised when spec has been modified by another client."""
def __init__(self, message: str, current_hash: str):
super().__init__(message)
self.current_hash = current_hash
class WebSocketSubscriber:
"""Protocol for WebSocket subscribers."""
async def send_json(self, data: Dict[str, Any]) -> None:
"""Send JSON data to subscriber."""
raise NotImplementedError
class SpecManager:
"""
Central service for managing AtomizerSpec.
All modifications go through this service to ensure:
- Validation on every change
- Atomic file writes
- Conflict detection via hashing
- WebSocket broadcast to all clients
"""
SPEC_FILENAME = "atomizer_spec.json"
def __init__(self, study_path: Union[str, Path]):
"""
Initialize SpecManager for a study.
Args:
study_path: Path to the study directory
"""
self.study_path = Path(study_path)
self.spec_path = self.study_path / self.SPEC_FILENAME
self.validator = SpecValidator()
self._subscribers: List[WebSocketSubscriber] = []
self._last_hash: Optional[str] = None
# =========================================================================
# Core CRUD Operations
# =========================================================================
def load(self, validate: bool = True) -> AtomizerSpec:
"""
Load and optionally validate the spec.
Args:
validate: Whether to validate the spec
Returns:
AtomizerSpec instance
Raises:
SpecNotFoundError: If spec file doesn't exist
SpecValidationError: If validation fails
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
with open(self.spec_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if validate:
self.validator.validate(data, strict=True)
spec = AtomizerSpec.model_validate(data)
self._last_hash = self._compute_hash(data)
return spec
def load_raw(self) -> Dict[str, Any]:
"""
Load spec as raw dict without parsing.
Returns:
Raw spec dict
Raises:
SpecNotFoundError: If spec file doesn't exist
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
with open(self.spec_path, 'r', encoding='utf-8') as f:
return json.load(f)
def save(
self,
spec: Union[AtomizerSpec, Dict[str, Any]],
modified_by: str = "api",
expected_hash: Optional[str] = None
) -> str:
"""
Save spec with validation and broadcast.
Args:
spec: Spec to save (AtomizerSpec or dict)
modified_by: Who/what is making the change
expected_hash: If provided, verify current file hash matches
Returns:
New spec hash
Raises:
SpecValidationError: If validation fails
SpecConflictError: If expected_hash doesn't match current
"""
# Convert to dict if needed
if isinstance(spec, AtomizerSpec):
data = spec.model_dump(mode='json')
else:
data = spec
# Check for conflicts if expected_hash provided
if expected_hash and self.spec_path.exists():
current_hash = self.get_hash()
if current_hash != expected_hash:
raise SpecConflictError(
"Spec was modified by another client",
current_hash=current_hash
)
# Update metadata
now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
data["meta"]["modified"] = now
data["meta"]["modified_by"] = modified_by
# Validate
self.validator.validate(data, strict=True)
# Compute new hash
new_hash = self._compute_hash(data)
# Atomic write (write to temp, then rename)
temp_path = self.spec_path.with_suffix('.tmp')
with open(temp_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
temp_path.replace(self.spec_path)
# Update cached hash
self._last_hash = new_hash
# Broadcast to subscribers
self._broadcast({
"type": "spec_updated",
"hash": new_hash,
"modified_by": modified_by,
"timestamp": now
})
return new_hash
def exists(self) -> bool:
"""Check if spec file exists."""
return self.spec_path.exists()
def get_hash(self) -> str:
"""Get current spec hash."""
if not self.spec_path.exists():
return ""
with open(self.spec_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return self._compute_hash(data)
def validate_and_report(self) -> ValidationReport:
"""
Run full validation and return detailed report.
Returns:
ValidationReport with errors, warnings, summary
"""
if not self.spec_path.exists():
raise SpecNotFoundError(f"Spec not found: {self.spec_path}")
data = self.load_raw()
return self.validator.validate(data, strict=False)
# =========================================================================
# Patch Operations
# =========================================================================
def patch(
self,
path: str,
value: Any,
modified_by: str = "api"
) -> AtomizerSpec:
"""
Apply a JSONPath-style modification.
Args:
path: JSONPath like "design_variables[0].bounds.max"
value: New value to set
modified_by: Who/what is making the change
Returns:
Updated AtomizerSpec
"""
data = self.load_raw()
# Validate the partial update
spec = AtomizerSpec.model_validate(data)
is_valid, errors = self.validator.validate_partial(path, value, spec)
if not is_valid:
raise SpecValidationError(f"Invalid update: {'; '.join(errors)}")
# Apply the patch
self._apply_patch(data, path, value)
# Save and return
self.save(data, modified_by)
return self.load(validate=False)
def _apply_patch(self, data: Dict, path: str, value: Any) -> None:
"""
Apply a patch to the data dict.
Supports paths like:
- "meta.description"
- "design_variables[0].bounds.max"
- "objectives[1].weight"
"""
parts = self._parse_path(path)
if not parts:
raise ValueError(f"Invalid path: {path}")
# Navigate to parent
current = data
for part in parts[:-1]:
if isinstance(current, list):
idx = int(part)
current = current[idx]
else:
current = current[part]
# Set final value
final_key = parts[-1]
if isinstance(current, list):
idx = int(final_key)
current[idx] = value
else:
current[final_key] = value
def _parse_path(self, path: str) -> List[str]:
"""Parse JSONPath into parts."""
# Handle both dot notation and bracket notation
parts = []
for part in re.split(r'\.|\[|\]', path):
if part:
parts.append(part)
return parts
# =========================================================================
# Node Operations
# =========================================================================
def add_node(
self,
node_type: str,
node_data: Dict[str, Any],
modified_by: str = "canvas"
) -> str:
"""
Add a new node (design var, extractor, objective, constraint).
Args:
node_type: One of 'designVar', 'extractor', 'objective', 'constraint'
node_data: Node data without ID
modified_by: Who/what is making the change
Returns:
Generated node ID
"""
data = self.load_raw()
# Generate ID
node_id = self._generate_id(node_type, data)
node_data["id"] = node_id
# Add canvas position if not provided
if "canvas_position" not in node_data:
node_data["canvas_position"] = self._auto_position(node_type, data)
# Add to appropriate section
section = self._get_section_for_type(node_type)
if section not in data or data[section] is None:
data[section] = []
data[section].append(node_data)
self.save(data, modified_by)
# Broadcast node addition
self._broadcast({
"type": "node_added",
"node_type": node_type,
"node_id": node_id,
"modified_by": modified_by
})
return node_id
def update_node(
self,
node_id: str,
updates: Dict[str, Any],
modified_by: str = "canvas"
) -> None:
"""
Update an existing node.
Args:
node_id: ID of the node to update
updates: Dict of fields to update
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find and update the node
found = False
for section in ["design_variables", "extractors", "objectives", "constraints"]:
if section not in data or data[section] is None:
continue
for node in data[section]:
if node.get("id") == node_id:
node.update(updates)
found = True
break
if found:
break
if not found:
raise SpecManagerError(f"Node not found: {node_id}")
self.save(data, modified_by)
def remove_node(
self,
node_id: str,
modified_by: str = "canvas"
) -> None:
"""
Remove a node and all edges referencing it.
Args:
node_id: ID of the node to remove
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find and remove node
removed = False
for section in ["design_variables", "extractors", "objectives", "constraints"]:
if section not in data or data[section] is None:
continue
original_len = len(data[section])
data[section] = [n for n in data[section] if n.get("id") != node_id]
if len(data[section]) < original_len:
removed = True
break
if not removed:
raise SpecManagerError(f"Node not found: {node_id}")
# Remove edges referencing this node
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
data["canvas"]["edges"] = [
e for e in data["canvas"]["edges"]
if e.get("source") != node_id and e.get("target") != node_id
]
self.save(data, modified_by)
# Broadcast node removal
self._broadcast({
"type": "node_removed",
"node_id": node_id,
"modified_by": modified_by
})
def update_node_position(
self,
node_id: str,
position: Dict[str, float],
modified_by: str = "canvas"
) -> None:
"""
Update a node's canvas position.
Args:
node_id: ID of the node
position: Dict with x, y coordinates
modified_by: Who/what is making the change
"""
self.update_node(node_id, {"canvas_position": position}, modified_by)
def add_edge(
self,
source: str,
target: str,
modified_by: str = "canvas"
) -> None:
"""
Add a canvas edge between nodes.
Args:
source: Source node ID
target: Target node ID
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Initialize canvas section if needed
if "canvas" not in data or data["canvas"] is None:
data["canvas"] = {}
if "edges" not in data["canvas"] or data["canvas"]["edges"] is None:
data["canvas"]["edges"] = []
# Check for duplicate
for edge in data["canvas"]["edges"]:
if edge.get("source") == source and edge.get("target") == target:
return # Already exists
data["canvas"]["edges"].append({
"source": source,
"target": target
})
self.save(data, modified_by)
def remove_edge(
self,
source: str,
target: str,
modified_by: str = "canvas"
) -> None:
"""
Remove a canvas edge.
Args:
source: Source node ID
target: Target node ID
modified_by: Who/what is making the change
"""
data = self.load_raw()
if "canvas" in data and data["canvas"] and "edges" in data["canvas"]:
data["canvas"]["edges"] = [
e for e in data["canvas"]["edges"]
if not (e.get("source") == source and e.get("target") == target)
]
self.save(data, modified_by)
# =========================================================================
# Custom Function Support
# =========================================================================
def add_custom_function(
self,
name: str,
code: str,
outputs: List[str],
description: Optional[str] = None,
modified_by: str = "claude"
) -> str:
"""
Add a custom extractor function.
Args:
name: Function name
code: Python source code
outputs: List of output names
description: Optional description
modified_by: Who/what is making the change
Returns:
Generated extractor ID
Raises:
SpecValidationError: If Python syntax is invalid
"""
# Validate Python syntax
try:
compile(code, f"<custom:{name}>", "exec")
except SyntaxError as e:
raise SpecValidationError(
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
)
data = self.load_raw()
# Generate extractor ID
ext_id = self._generate_id("extractor", data)
# Create extractor
extractor = {
"id": ext_id,
"name": description or f"Custom: {name}",
"type": "custom_function",
"builtin": False,
"function": {
"name": name,
"module": "custom_extractors.dynamic",
"source_code": code
},
"outputs": [{"name": o, "metric": "custom"} for o in outputs],
"canvas_position": self._auto_position("extractor", data)
}
data["extractors"].append(extractor)
self.save(data, modified_by)
return ext_id
def update_custom_function(
self,
extractor_id: str,
code: Optional[str] = None,
outputs: Optional[List[str]] = None,
modified_by: str = "claude"
) -> None:
"""
Update an existing custom function.
Args:
extractor_id: ID of the custom extractor
code: New Python code (optional)
outputs: New outputs (optional)
modified_by: Who/what is making the change
"""
data = self.load_raw()
# Find the extractor
extractor = None
for ext in data.get("extractors", []):
if ext.get("id") == extractor_id:
extractor = ext
break
if not extractor:
raise SpecManagerError(f"Extractor not found: {extractor_id}")
if extractor.get("type") != "custom_function":
raise SpecManagerError(f"Extractor {extractor_id} is not a custom function")
# Update code
if code is not None:
try:
compile(code, f"<custom:{extractor_id}>", "exec")
except SyntaxError as e:
raise SpecValidationError(
f"Invalid Python syntax: {e.msg} at line {e.lineno}"
)
if "function" not in extractor:
extractor["function"] = {}
extractor["function"]["source_code"] = code
# Update outputs
if outputs is not None:
extractor["outputs"] = [{"name": o, "metric": "custom"} for o in outputs]
self.save(data, modified_by)
# =========================================================================
# WebSocket Subscription
# =========================================================================
def subscribe(self, subscriber: WebSocketSubscriber) -> None:
"""Subscribe to spec changes."""
if subscriber not in self._subscribers:
self._subscribers.append(subscriber)
def unsubscribe(self, subscriber: WebSocketSubscriber) -> None:
"""Unsubscribe from spec changes."""
if subscriber in self._subscribers:
self._subscribers.remove(subscriber)
def _broadcast(self, message: Dict[str, Any]) -> None:
"""Broadcast message to all subscribers."""
import asyncio
for subscriber in self._subscribers:
try:
# Handle both sync and async contexts
try:
loop = asyncio.get_running_loop()
loop.create_task(subscriber.send_json(message))
except RuntimeError:
# No running loop, try direct call if possible
pass
except Exception:
# Subscriber may have disconnected
pass
# =========================================================================
# Helper Methods
# =========================================================================
def _compute_hash(self, data: Dict) -> str:
"""Compute hash of spec data for conflict detection."""
# Sort keys for consistent hashing
json_str = json.dumps(data, sort_keys=True, ensure_ascii=False)
return hashlib.sha256(json_str.encode()).hexdigest()[:16]
def _generate_id(self, node_type: str, data: Dict) -> str:
"""Generate unique ID for a node type."""
prefix_map = {
"designVar": "dv",
"design_variable": "dv",
"extractor": "ext",
"objective": "obj",
"constraint": "con"
}
prefix = prefix_map.get(node_type, node_type[:3])
# Find existing IDs
section = self._get_section_for_type(node_type)
existing_ids: Set[str] = set()
if section in data and data[section]:
existing_ids = {n.get("id", "") for n in data[section]}
# Generate next available ID
for i in range(1, 1000):
new_id = f"{prefix}_{i:03d}"
if new_id not in existing_ids:
return new_id
raise SpecManagerError(f"Cannot generate ID for {node_type}: too many nodes")
def _get_section_for_type(self, node_type: str) -> str:
"""Map node type to spec section name."""
section_map = {
"designVar": "design_variables",
"design_variable": "design_variables",
"extractor": "extractors",
"objective": "objectives",
"constraint": "constraints"
}
return section_map.get(node_type, node_type + "s")
def _auto_position(self, node_type: str, data: Dict) -> Dict[str, float]:
"""Calculate auto position for a new node."""
# Default x positions by type
x_positions = {
"designVar": 50,
"design_variable": 50,
"extractor": 740,
"objective": 1020,
"constraint": 1020
}
x = x_positions.get(node_type, 400)
# Find max y position for this type
section = self._get_section_for_type(node_type)
max_y = 0
if section in data and data[section]:
for node in data[section]:
pos = node.get("canvas_position", {})
y = pos.get("y", 0)
if y > max_y:
max_y = y
# Place below existing nodes
y = max_y + 100 if max_y > 0 else 100
return {"x": x, "y": y}
# =========================================================================
# Factory Function
# =========================================================================
def get_spec_manager(study_path: Union[str, Path]) -> SpecManager:
"""
Get a SpecManager instance for a study.
Args:
study_path: Path to the study directory
Returns:
SpecManager instance
"""
return SpecManager(study_path)