feat(dashboard): Enhanced chat, spec management, and Claude integration
Backend: - spec.py: New AtomizerSpec REST API endpoints - spec_manager.py: SpecManager service for unified config - interview_engine.py: Study creation interview logic - claude.py: Enhanced Claude API with context - optimization.py: Extended optimization endpoints - context_builder.py, session_manager.py: Improved services Frontend: - Chat components: Enhanced message rendering, tool call cards - Hooks: useClaudeCode, useSpecWebSocket, improved useChat - Pages: Updated Dashboard, Analysis, Insights, Setup, Home - Components: ParallelCoordinatesPlot, ParetoPlot improvements - App.tsx: Route updates for canvas/studio Infrastructure: - vite.config.ts: Build configuration updates - start/stop-dashboard.bat: Script improvements
This commit is contained in:
@@ -187,7 +187,15 @@ async def session_websocket(websocket: WebSocket, session_id: str):
|
||||
continue
|
||||
|
||||
# Get canvas state from message or use stored state
|
||||
canvas_state = data.get("canvas_state") or current_canvas_state
|
||||
msg_canvas = data.get("canvas_state")
|
||||
canvas_state = msg_canvas if msg_canvas is not None else current_canvas_state
|
||||
|
||||
# Debug logging
|
||||
if canvas_state:
|
||||
node_count = len(canvas_state.get("nodes", []))
|
||||
print(f"[Claude WS] Sending message with canvas state: {node_count} nodes")
|
||||
else:
|
||||
print("[Claude WS] Sending message WITHOUT canvas state")
|
||||
|
||||
async for chunk in manager.send_message(
|
||||
session_id,
|
||||
@@ -401,6 +409,175 @@ async def websocket_chat(websocket: WebSocket):
|
||||
pass
|
||||
|
||||
|
||||
# ========== POWER MODE: Direct API with Write Tools ==========
|
||||
|
||||
@router.websocket("/sessions/{session_id}/ws/power")
|
||||
async def power_mode_websocket(websocket: WebSocket, session_id: str):
|
||||
"""
|
||||
WebSocket for power mode chat using direct Anthropic API with write tools.
|
||||
|
||||
Unlike the regular /ws endpoint which uses Claude CLI + MCP,
|
||||
this uses AtomizerClaudeAgent directly with built-in write tools.
|
||||
This allows immediate modifications without permission prompts.
|
||||
|
||||
Message formats (client -> server):
|
||||
{"type": "message", "content": "user message"}
|
||||
{"type": "set_study", "study_id": "study_name"}
|
||||
{"type": "ping"}
|
||||
|
||||
Message formats (server -> client):
|
||||
{"type": "text", "content": "..."}
|
||||
{"type": "tool_call", "tool": "...", "input": {...}}
|
||||
{"type": "tool_result", "result": "..."}
|
||||
{"type": "done", "tool_calls": [...]}
|
||||
{"type": "error", "message": "..."}
|
||||
{"type": "spec_modified", "changes": [...]}
|
||||
{"type": "pong"}
|
||||
"""
|
||||
await websocket.accept()
|
||||
|
||||
manager = get_session_manager()
|
||||
session = manager.get_session(session_id)
|
||||
|
||||
if not session:
|
||||
await websocket.send_json({"type": "error", "message": "Session not found"})
|
||||
await websocket.close()
|
||||
return
|
||||
|
||||
# Import AtomizerClaudeAgent for direct API access
|
||||
from api.services.claude_agent import AtomizerClaudeAgent
|
||||
|
||||
# Create agent with study context
|
||||
agent = AtomizerClaudeAgent(study_id=session.study_id)
|
||||
conversation_history: List[Dict[str, Any]] = []
|
||||
|
||||
# Load initial spec and set canvas state so Claude sees current canvas
|
||||
initial_spec = agent.load_current_spec()
|
||||
if initial_spec:
|
||||
# Send initial spec to frontend
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"spec": initial_spec,
|
||||
"reason": "initial_load"
|
||||
})
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_json()
|
||||
|
||||
if data.get("type") == "message":
|
||||
content = data.get("content", "")
|
||||
if not content:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Use streaming API with tool support for real-time response
|
||||
last_tool_calls = []
|
||||
async for event in agent.chat_stream_with_tools(content, conversation_history):
|
||||
event_type = event.get("type")
|
||||
|
||||
if event_type == "text":
|
||||
# Stream text tokens to frontend immediately
|
||||
await websocket.send_json({
|
||||
"type": "text",
|
||||
"content": event.get("content", ""),
|
||||
})
|
||||
|
||||
elif event_type == "tool_call":
|
||||
# Tool is being called
|
||||
tool_info = event.get("tool", {})
|
||||
await websocket.send_json({
|
||||
"type": "tool_call",
|
||||
"tool": tool_info,
|
||||
})
|
||||
|
||||
elif event_type == "tool_result":
|
||||
# Tool finished executing
|
||||
tool_name = event.get("tool", "")
|
||||
await websocket.send_json({
|
||||
"type": "tool_result",
|
||||
"tool": tool_name,
|
||||
"result": event.get("result", ""),
|
||||
})
|
||||
|
||||
# If it was a write tool, send full updated spec
|
||||
if tool_name in ["add_design_variable", "add_extractor",
|
||||
"add_objective", "add_constraint",
|
||||
"update_spec_field", "remove_node",
|
||||
"create_study"]:
|
||||
# Load updated spec and update agent's canvas state
|
||||
updated_spec = agent.load_current_spec()
|
||||
if updated_spec:
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"tool": tool_name,
|
||||
"spec": updated_spec, # Full spec for direct canvas update
|
||||
})
|
||||
|
||||
elif event_type == "done":
|
||||
# Streaming complete
|
||||
last_tool_calls = event.get("tool_calls", [])
|
||||
await websocket.send_json({
|
||||
"type": "done",
|
||||
"tool_calls": last_tool_calls,
|
||||
})
|
||||
|
||||
# Update conversation history for next message
|
||||
# Note: For proper history tracking, we'd need to store messages properly
|
||||
# For now, we append the user message and response
|
||||
conversation_history.append({"role": "user", "content": content})
|
||||
conversation_history.append({"role": "assistant", "content": event.get("response", "")})
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e),
|
||||
})
|
||||
|
||||
elif data.get("type") == "canvas_edit":
|
||||
# User made a manual edit to the canvas - update Claude's context
|
||||
spec = data.get("spec")
|
||||
if spec:
|
||||
agent.set_canvas_state(spec)
|
||||
await websocket.send_json({
|
||||
"type": "canvas_edit_received",
|
||||
"acknowledged": True
|
||||
})
|
||||
|
||||
elif data.get("type") == "set_study":
|
||||
study_id = data.get("study_id")
|
||||
if study_id:
|
||||
await manager.set_study_context(session_id, study_id)
|
||||
# Recreate agent with new study context
|
||||
agent = AtomizerClaudeAgent(study_id=study_id)
|
||||
conversation_history = [] # Clear history on study change
|
||||
# Load spec for new study
|
||||
new_spec = agent.load_current_spec()
|
||||
await websocket.send_json({
|
||||
"type": "context_updated",
|
||||
"study_id": study_id,
|
||||
})
|
||||
if new_spec:
|
||||
await websocket.send_json({
|
||||
"type": "spec_updated",
|
||||
"spec": new_spec,
|
||||
"reason": "study_change"
|
||||
})
|
||||
|
||||
elif data.get("type") == "ping":
|
||||
await websocket.send_json({"type": "pong"})
|
||||
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
try:
|
||||
await websocket.send_json({"type": "error", "message": str(e)})
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@router.get("/suggestions")
|
||||
async def get_chat_suggestions(study_id: Optional[str] = None):
|
||||
"""
|
||||
|
||||
@@ -38,16 +38,30 @@ def resolve_study_path(study_id: str) -> Path:
|
||||
"""Find study folder by scanning all topic directories.
|
||||
|
||||
Supports nested folder structure: studies/Topic/study_name/
|
||||
Study ID is the short name (e.g., 'm1_mirror_adaptive_V14')
|
||||
Study ID can be:
|
||||
- Short name (e.g., 'm1_mirror_adaptive_V14') - scans all topic folders
|
||||
- Full nested path (e.g., 'M1_Mirror/m1_mirror_cost_reduction_lateral')
|
||||
|
||||
Returns the full path to the study directory.
|
||||
Raises HTTPException 404 if not found.
|
||||
"""
|
||||
# Handle nested path format (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
|
||||
if "/" in study_id:
|
||||
# Try with forward slashes
|
||||
nested_path = STUDIES_DIR / study_id
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
if _is_valid_study_dir(nested_path):
|
||||
return nested_path
|
||||
# Try with backslashes (Windows path)
|
||||
nested_path = STUDIES_DIR / study_id.replace("/", "\\")
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
if _is_valid_study_dir(nested_path):
|
||||
return nested_path
|
||||
|
||||
# First check direct path (backwards compatibility for flat structure)
|
||||
direct_path = STUDIES_DIR / study_id
|
||||
if direct_path.exists() and direct_path.is_dir():
|
||||
# Verify it's actually a study (has 1_setup or config)
|
||||
if (direct_path / "1_setup").exists() or (direct_path / "optimization_config.json").exists():
|
||||
if _is_valid_study_dir(direct_path):
|
||||
return direct_path
|
||||
|
||||
# Scan topic folders for nested structure
|
||||
@@ -55,13 +69,21 @@ def resolve_study_path(study_id: str) -> Path:
|
||||
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
|
||||
study_dir = topic_dir / study_id
|
||||
if study_dir.exists() and study_dir.is_dir():
|
||||
# Verify it's actually a study
|
||||
if (study_dir / "1_setup").exists() or (study_dir / "optimization_config.json").exists():
|
||||
if _is_valid_study_dir(study_dir):
|
||||
return study_dir
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
|
||||
|
||||
|
||||
def _is_valid_study_dir(study_dir: Path) -> bool:
|
||||
"""Check if a directory is a valid study directory."""
|
||||
return (
|
||||
(study_dir / "1_setup").exists() or
|
||||
(study_dir / "optimization_config.json").exists() or
|
||||
(study_dir / "atomizer_spec.json").exists()
|
||||
)
|
||||
|
||||
|
||||
def get_study_topic(study_dir: Path) -> Optional[str]:
|
||||
"""Get the topic folder name for a study, or None if in root."""
|
||||
# Check if parent is a topic folder (not the root studies dir)
|
||||
@@ -1542,16 +1564,17 @@ async def get_study_image(study_id: str, image_path: str):
|
||||
raise HTTPException(status_code=500, detail=f"Failed to serve image: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/config")
|
||||
@router.get("/studies/{study_id:path}/config")
|
||||
async def get_study_config(study_id: str):
|
||||
"""
|
||||
Get the full optimization_config.json for a study
|
||||
Get the study configuration - reads from atomizer_spec.json (v2.0) first,
|
||||
falls back to legacy optimization_config.json if not found.
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
|
||||
Returns:
|
||||
JSON with the complete configuration
|
||||
JSON with the complete configuration in a unified format
|
||||
"""
|
||||
try:
|
||||
study_dir = resolve_study_path(study_id)
|
||||
@@ -1559,7 +1582,22 @@ async def get_study_config(study_id: str):
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
|
||||
|
||||
# Look for config in various locations
|
||||
# Priority 1: atomizer_spec.json (v2.0 unified format)
|
||||
spec_file = study_dir / "atomizer_spec.json"
|
||||
if spec_file.exists():
|
||||
with open(spec_file) as f:
|
||||
spec = json.load(f)
|
||||
|
||||
# Transform AtomizerSpec to the expected config format
|
||||
config = _transform_spec_to_config(spec, study_id)
|
||||
return {
|
||||
"config": config,
|
||||
"path": str(spec_file),
|
||||
"study_id": study_id,
|
||||
"source": "atomizer_spec"
|
||||
}
|
||||
|
||||
# Priority 2: Legacy optimization_config.json
|
||||
config_file = study_dir / "1_setup" / "optimization_config.json"
|
||||
if not config_file.exists():
|
||||
config_file = study_dir / "optimization_config.json"
|
||||
@@ -1573,7 +1611,8 @@ async def get_study_config(study_id: str):
|
||||
return {
|
||||
"config": config,
|
||||
"path": str(config_file),
|
||||
"study_id": study_id
|
||||
"study_id": study_id,
|
||||
"source": "legacy_config"
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
@@ -1582,6 +1621,118 @@ async def get_study_config(study_id: str):
|
||||
raise HTTPException(status_code=500, detail=f"Failed to read config: {str(e)}")
|
||||
|
||||
|
||||
def _transform_spec_to_config(spec: dict, study_id: str) -> dict:
|
||||
"""Transform AtomizerSpec v2.0 format to legacy config format for backwards compatibility."""
|
||||
meta = spec.get("meta", {})
|
||||
model = spec.get("model", {})
|
||||
optimization = spec.get("optimization", {})
|
||||
|
||||
# Transform design variables
|
||||
design_variables = []
|
||||
for dv in spec.get("design_variables", []):
|
||||
bounds = dv.get("bounds", {})
|
||||
design_variables.append({
|
||||
"name": dv.get("name"),
|
||||
"expression_name": dv.get("expression_name"),
|
||||
"type": "float" if dv.get("type") == "continuous" else dv.get("type", "float"),
|
||||
"min": bounds.get("min"),
|
||||
"max": bounds.get("max"),
|
||||
"low": bounds.get("min"), # Alias for compatibility
|
||||
"high": bounds.get("max"), # Alias for compatibility
|
||||
"baseline": dv.get("baseline"),
|
||||
"unit": dv.get("units"),
|
||||
"units": dv.get("units"),
|
||||
"enabled": dv.get("enabled", True)
|
||||
})
|
||||
|
||||
# Transform objectives
|
||||
objectives = []
|
||||
for obj in spec.get("objectives", []):
|
||||
source = obj.get("source", {})
|
||||
objectives.append({
|
||||
"name": obj.get("name"),
|
||||
"direction": obj.get("direction", "minimize"),
|
||||
"weight": obj.get("weight", 1.0),
|
||||
"target": obj.get("target"),
|
||||
"unit": obj.get("units"),
|
||||
"units": obj.get("units"),
|
||||
"extractor_id": source.get("extractor_id"),
|
||||
"output_key": source.get("output_key")
|
||||
})
|
||||
|
||||
# Transform constraints
|
||||
constraints = []
|
||||
for con in spec.get("constraints", []):
|
||||
constraints.append({
|
||||
"name": con.get("name"),
|
||||
"type": _operator_to_type(con.get("operator", "<=")),
|
||||
"operator": con.get("operator"),
|
||||
"max_value": con.get("threshold") if con.get("operator") in ["<=", "<"] else None,
|
||||
"min_value": con.get("threshold") if con.get("operator") in [">=", ">"] else None,
|
||||
"bound": con.get("threshold"),
|
||||
"unit": con.get("units"),
|
||||
"units": con.get("units")
|
||||
})
|
||||
|
||||
# Transform extractors
|
||||
extractors = []
|
||||
for ext in spec.get("extractors", []):
|
||||
extractors.append({
|
||||
"name": ext.get("name"),
|
||||
"type": ext.get("type"),
|
||||
"builtin": ext.get("builtin", True),
|
||||
"config": ext.get("config", {}),
|
||||
"outputs": ext.get("outputs", [])
|
||||
})
|
||||
|
||||
# Get algorithm info
|
||||
algorithm = optimization.get("algorithm", {})
|
||||
budget = optimization.get("budget", {})
|
||||
|
||||
# Build the config in legacy format
|
||||
config = {
|
||||
"study_name": meta.get("study_name", study_id),
|
||||
"description": meta.get("description", ""),
|
||||
"version": meta.get("version", "2.0"),
|
||||
"design_variables": design_variables,
|
||||
"objectives": objectives,
|
||||
"constraints": constraints,
|
||||
"extractors": extractors,
|
||||
"optimization": {
|
||||
"algorithm": algorithm.get("type", "TPE"),
|
||||
"n_trials": budget.get("max_trials", 100),
|
||||
"max_time_hours": budget.get("max_time_hours"),
|
||||
"convergence_patience": budget.get("convergence_patience")
|
||||
},
|
||||
"optimization_settings": {
|
||||
"sampler": algorithm.get("type", "TPE"),
|
||||
"n_trials": budget.get("max_trials", 100)
|
||||
},
|
||||
"algorithm": {
|
||||
"name": "Optuna",
|
||||
"sampler": algorithm.get("type", "TPE"),
|
||||
"n_trials": budget.get("max_trials", 100)
|
||||
},
|
||||
"model": model,
|
||||
"sim_file": model.get("sim", {}).get("path") if isinstance(model.get("sim"), dict) else None
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def _operator_to_type(operator: str) -> str:
|
||||
"""Convert constraint operator to legacy type string."""
|
||||
mapping = {
|
||||
"<=": "le",
|
||||
"<": "le",
|
||||
">=": "ge",
|
||||
">": "ge",
|
||||
"==": "eq",
|
||||
"=": "eq"
|
||||
}
|
||||
return mapping.get(operator, "le")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Process Control Endpoints
|
||||
# ============================================================================
|
||||
@@ -2851,7 +3002,162 @@ async def get_study_runs(study_id: str):
|
||||
|
||||
|
||||
class UpdateConfigRequest(BaseModel):
|
||||
config: dict
|
||||
config: Optional[dict] = None
|
||||
intent: Optional[dict] = None
|
||||
|
||||
|
||||
def intent_to_config(intent: dict, existing_config: Optional[dict] = None) -> dict:
|
||||
"""
|
||||
Convert canvas intent format to optimization_config.json format.
|
||||
|
||||
Preserves existing config fields that aren't in the intent.
|
||||
"""
|
||||
# Start with existing config or empty
|
||||
config = existing_config.copy() if existing_config else {}
|
||||
|
||||
# Metadata
|
||||
if intent.get('model', {}).get('path'):
|
||||
model_path = Path(intent['model']['path']).name
|
||||
if 'simulation' not in config:
|
||||
config['simulation'] = {}
|
||||
config['simulation']['model_file'] = model_path
|
||||
# Try to infer other files from model name
|
||||
base_name = model_path.replace('.prt', '')
|
||||
if not config['simulation'].get('fem_file'):
|
||||
config['simulation']['fem_file'] = f"{base_name}_fem1.fem"
|
||||
if not config['simulation'].get('sim_file'):
|
||||
config['simulation']['sim_file'] = f"{base_name}_sim1.sim"
|
||||
|
||||
# Solver
|
||||
if intent.get('solver', {}).get('type'):
|
||||
solver_type = intent['solver']['type']
|
||||
if 'simulation' not in config:
|
||||
config['simulation'] = {}
|
||||
config['simulation']['solver'] = 'nastran'
|
||||
# Map SOL types to analysis_types
|
||||
sol_to_analysis = {
|
||||
'SOL101': ['static'],
|
||||
'SOL103': ['modal'],
|
||||
'SOL105': ['buckling'],
|
||||
'SOL106': ['nonlinear'],
|
||||
'SOL111': ['modal', 'frequency_response'],
|
||||
'SOL112': ['modal', 'transient'],
|
||||
}
|
||||
config['simulation']['analysis_types'] = sol_to_analysis.get(solver_type, ['static'])
|
||||
|
||||
# Design Variables
|
||||
if intent.get('design_variables'):
|
||||
config['design_variables'] = []
|
||||
for dv in intent['design_variables']:
|
||||
config['design_variables'].append({
|
||||
'parameter': dv.get('name', dv.get('expression_name', '')),
|
||||
'bounds': [dv.get('min', 0), dv.get('max', 100)],
|
||||
'description': dv.get('description', f"Design variable: {dv.get('name', '')}"),
|
||||
})
|
||||
|
||||
# Extractors → used for objectives/constraints extraction
|
||||
extractor_map = {}
|
||||
if intent.get('extractors'):
|
||||
for ext in intent['extractors']:
|
||||
ext_id = ext.get('id', '')
|
||||
ext_name = ext.get('name', '')
|
||||
extractor_map[ext_name] = ext
|
||||
|
||||
# Objectives
|
||||
if intent.get('objectives'):
|
||||
config['objectives'] = []
|
||||
for obj in intent['objectives']:
|
||||
obj_config = {
|
||||
'name': obj.get('name', 'objective'),
|
||||
'goal': obj.get('direction', 'minimize'),
|
||||
'weight': obj.get('weight', 1.0),
|
||||
'description': obj.get('description', f"Objective: {obj.get('name', '')}"),
|
||||
}
|
||||
# Add extraction config if extractor referenced
|
||||
extractor_name = obj.get('extractor')
|
||||
if extractor_name and extractor_name in extractor_map:
|
||||
ext = extractor_map[extractor_name]
|
||||
ext_config = ext.get('config', {})
|
||||
obj_config['extraction'] = {
|
||||
'action': _extractor_id_to_action(ext.get('id', '')),
|
||||
'domain': 'result_extraction',
|
||||
'params': ext_config,
|
||||
}
|
||||
config['objectives'].append(obj_config)
|
||||
|
||||
# Constraints
|
||||
if intent.get('constraints'):
|
||||
config['constraints'] = []
|
||||
for con in intent['constraints']:
|
||||
op = con.get('operator', '<=')
|
||||
con_type = 'less_than' if '<' in op else 'greater_than' if '>' in op else 'equal_to'
|
||||
con_config = {
|
||||
'name': con.get('name', 'constraint'),
|
||||
'type': con_type,
|
||||
'threshold': con.get('value', 0),
|
||||
'description': con.get('description', f"Constraint: {con.get('name', '')}"),
|
||||
}
|
||||
# Add extraction config if extractor referenced
|
||||
extractor_name = con.get('extractor')
|
||||
if extractor_name and extractor_name in extractor_map:
|
||||
ext = extractor_map[extractor_name]
|
||||
ext_config = ext.get('config', {})
|
||||
con_config['extraction'] = {
|
||||
'action': _extractor_id_to_action(ext.get('id', '')),
|
||||
'domain': 'result_extraction',
|
||||
'params': ext_config,
|
||||
}
|
||||
config['constraints'].append(con_config)
|
||||
|
||||
# Optimization settings
|
||||
if intent.get('optimization'):
|
||||
opt = intent['optimization']
|
||||
if 'optimization_settings' not in config:
|
||||
config['optimization_settings'] = {}
|
||||
if opt.get('max_trials'):
|
||||
config['optimization_settings']['n_trials'] = opt['max_trials']
|
||||
if opt.get('method'):
|
||||
# Map method names to Optuna sampler names
|
||||
method_map = {
|
||||
'TPE': 'TPESampler',
|
||||
'CMA-ES': 'CmaEsSampler',
|
||||
'NSGA-II': 'NSGAIISampler',
|
||||
'RandomSearch': 'RandomSampler',
|
||||
'GP-BO': 'GPSampler',
|
||||
}
|
||||
config['optimization_settings']['sampler'] = method_map.get(opt['method'], opt['method'])
|
||||
|
||||
# Surrogate
|
||||
if intent.get('surrogate', {}).get('enabled'):
|
||||
config['surrogate'] = {
|
||||
'type': intent['surrogate'].get('type', 'MLP'),
|
||||
'min_trials': intent['surrogate'].get('min_trials', 20),
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def _extractor_id_to_action(ext_id: str) -> str:
|
||||
"""Map extractor IDs (E1, E2, etc.) to extraction action names."""
|
||||
action_map = {
|
||||
'E1': 'extract_displacement',
|
||||
'E2': 'extract_frequency',
|
||||
'E3': 'extract_stress',
|
||||
'E4': 'extract_mass',
|
||||
'E5': 'extract_mass',
|
||||
'E8': 'extract_zernike',
|
||||
'E9': 'extract_zernike',
|
||||
'E10': 'extract_zernike',
|
||||
'displacement': 'extract_displacement',
|
||||
'frequency': 'extract_frequency',
|
||||
'stress': 'extract_stress',
|
||||
'mass': 'extract_mass',
|
||||
'mass_bdf': 'extract_mass',
|
||||
'mass_cad': 'extract_mass',
|
||||
'zernike': 'extract_zernike',
|
||||
'zernike_opd': 'extract_zernike',
|
||||
}
|
||||
return action_map.get(ext_id, 'extract_displacement')
|
||||
|
||||
|
||||
@router.put("/studies/{study_id}/config")
|
||||
@@ -2859,9 +3165,13 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
|
||||
"""
|
||||
Update the optimization_config.json for a study
|
||||
|
||||
Accepts either:
|
||||
- {"config": {...}} - Direct config object (overwrites)
|
||||
- {"intent": {...}} - Canvas intent (converted and merged with existing)
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
request: New configuration data
|
||||
request: New configuration data (config or intent)
|
||||
|
||||
Returns:
|
||||
JSON with success status
|
||||
@@ -2891,9 +3201,24 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
|
||||
backup_file = config_file.with_suffix('.json.backup')
|
||||
shutil.copy(config_file, backup_file)
|
||||
|
||||
# Determine which format was provided
|
||||
if request.config is not None:
|
||||
# Direct config update
|
||||
new_config = request.config
|
||||
elif request.intent is not None:
|
||||
# Convert intent to config, merging with existing
|
||||
with open(config_file, 'r') as f:
|
||||
existing_config = json.load(f)
|
||||
new_config = intent_to_config(request.intent, existing_config)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Request must include either 'config' or 'intent' field"
|
||||
)
|
||||
|
||||
# Write new config
|
||||
with open(config_file, 'w') as f:
|
||||
json.dump(request.config, f, indent=2)
|
||||
json.dump(new_config, f, indent=2)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
|
||||
646
atomizer-dashboard/backend/api/routes/spec.py
Normal file
646
atomizer-dashboard/backend/api/routes/spec.py
Normal file
@@ -0,0 +1,646 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 API Endpoints
|
||||
|
||||
REST API for managing AtomizerSpec configurations.
|
||||
All spec modifications flow through these endpoints.
|
||||
|
||||
Endpoints:
|
||||
- GET /studies/{study_id}/spec - Get full spec
|
||||
- PUT /studies/{study_id}/spec - Replace entire spec
|
||||
- PATCH /studies/{study_id}/spec - Partial update
|
||||
- POST /studies/{study_id}/spec/validate - Validate spec
|
||||
- POST /studies/{study_id}/spec/nodes - Add node
|
||||
- PATCH /studies/{study_id}/spec/nodes/{node_id} - Update node
|
||||
- DELETE /studies/{study_id}/spec/nodes/{node_id} - Delete node
|
||||
- POST /studies/{study_id}/spec/custom-functions - Add custom extractor
|
||||
- WebSocket /studies/{study_id}/spec/sync - Real-time sync
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, Field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import json
|
||||
import sys
|
||||
import asyncio
|
||||
|
||||
# Add project root to path
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
||||
|
||||
from api.services.spec_manager import (
|
||||
SpecManager,
|
||||
SpecManagerError,
|
||||
SpecNotFoundError,
|
||||
SpecConflictError,
|
||||
get_spec_manager,
|
||||
)
|
||||
from optimization_engine.config.spec_models import (
|
||||
AtomizerSpec,
|
||||
ValidationReport,
|
||||
)
|
||||
from optimization_engine.config.spec_validator import SpecValidationError
|
||||
|
||||
router = APIRouter(prefix="/studies/{study_id:path}/spec", tags=["spec"])
|
||||
|
||||
# Base studies directory
|
||||
STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Request/Response Models
|
||||
# ============================================================================
|
||||
|
||||
class SpecPatchRequest(BaseModel):
|
||||
"""Request for patching a spec field."""
|
||||
path: str = Field(..., description="JSONPath to the field (e.g., 'objectives[0].weight')")
|
||||
value: Any = Field(..., description="New value")
|
||||
modified_by: str = Field(default="api", description="Who is making the change")
|
||||
|
||||
|
||||
class NodeAddRequest(BaseModel):
|
||||
"""Request for adding a node."""
|
||||
type: str = Field(..., description="Node type: designVar, extractor, objective, constraint")
|
||||
data: Dict[str, Any] = Field(..., description="Node data")
|
||||
modified_by: str = Field(default="canvas", description="Who is making the change")
|
||||
|
||||
|
||||
class NodeUpdateRequest(BaseModel):
|
||||
"""Request for updating a node."""
|
||||
updates: Dict[str, Any] = Field(..., description="Fields to update")
|
||||
modified_by: str = Field(default="canvas", description="Who is making the change")
|
||||
|
||||
|
||||
class CustomFunctionRequest(BaseModel):
|
||||
"""Request for adding a custom extractor function."""
|
||||
name: str = Field(..., description="Function name")
|
||||
code: str = Field(..., description="Python source code")
|
||||
outputs: List[str] = Field(..., description="Output names")
|
||||
description: Optional[str] = Field(default=None, description="Human-readable description")
|
||||
modified_by: str = Field(default="claude", description="Who is making the change")
|
||||
|
||||
|
||||
class ExtractorValidationRequest(BaseModel):
|
||||
"""Request for validating custom extractor code."""
|
||||
function_name: str = Field(default="extract", description="Expected function name")
|
||||
source: str = Field(..., description="Python source code to validate")
|
||||
|
||||
|
||||
class SpecUpdateResponse(BaseModel):
|
||||
"""Response for spec modification operations."""
|
||||
success: bool
|
||||
hash: str
|
||||
modified: str
|
||||
modified_by: str
|
||||
|
||||
|
||||
class NodeAddResponse(BaseModel):
|
||||
"""Response for node add operation."""
|
||||
success: bool
|
||||
node_id: str
|
||||
message: str
|
||||
|
||||
|
||||
class ValidationResponse(BaseModel):
|
||||
"""Response for validation endpoint."""
|
||||
valid: bool
|
||||
errors: List[Dict[str, Any]]
|
||||
warnings: List[Dict[str, Any]]
|
||||
summary: Dict[str, int]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
def resolve_study_path(study_id: str) -> Path:
|
||||
"""Find study folder by scanning all topic directories.
|
||||
|
||||
Supports both formats:
|
||||
- "study_name" - Will scan topic folders to find it
|
||||
- "Topic/study_name" - Direct nested path (e.g., "M1_Mirror/m1_mirror_v1")
|
||||
"""
|
||||
# Handle nested paths (e.g., "M1_Mirror/m1_mirror_cost_reduction_lateral")
|
||||
if "/" in study_id:
|
||||
nested_path = STUDIES_DIR / study_id.replace("/", "\\") # Handle Windows paths
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
return nested_path
|
||||
# Also try with forward slashes (Path handles both)
|
||||
nested_path = STUDIES_DIR / study_id
|
||||
if nested_path.exists() and nested_path.is_dir():
|
||||
return nested_path
|
||||
|
||||
# Direct path (flat structure)
|
||||
direct_path = STUDIES_DIR / study_id
|
||||
if direct_path.exists() and direct_path.is_dir():
|
||||
return direct_path
|
||||
|
||||
# Scan topic folders (nested structure)
|
||||
for topic_dir in STUDIES_DIR.iterdir():
|
||||
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
|
||||
study_dir = topic_dir / study_id
|
||||
if study_dir.exists() and study_dir.is_dir():
|
||||
return study_dir
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
|
||||
|
||||
|
||||
def get_manager(study_id: str) -> SpecManager:
|
||||
"""Get SpecManager for a study."""
|
||||
study_path = resolve_study_path(study_id)
|
||||
return get_spec_manager(study_path)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# REST Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.get("", response_model=None)
|
||||
async def get_spec(study_id: str):
|
||||
"""
|
||||
Get the full AtomizerSpec for a study.
|
||||
|
||||
Returns the complete spec JSON with all design variables, extractors,
|
||||
objectives, constraints, and canvas state.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"No AtomizerSpec found for study '{study_id}'. Use migration or create new spec."
|
||||
)
|
||||
|
||||
try:
|
||||
spec = manager.load()
|
||||
return spec.model_dump(mode='json')
|
||||
except SpecValidationError as e:
|
||||
# Return spec even if invalid, but include validation info
|
||||
raw = manager.load_raw()
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={
|
||||
**raw,
|
||||
"_validation_error": str(e)
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/raw")
|
||||
async def get_spec_raw(study_id: str):
|
||||
"""
|
||||
Get the raw spec JSON without validation.
|
||||
|
||||
Useful for debugging or when spec is invalid.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
return manager.load_raw()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/hash")
|
||||
async def get_spec_hash(study_id: str):
|
||||
"""Get the current spec hash for conflict detection."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
return {"hash": manager.get_hash()}
|
||||
|
||||
|
||||
@router.put("", response_model=SpecUpdateResponse)
|
||||
async def replace_spec(
|
||||
study_id: str,
|
||||
spec: Dict[str, Any],
|
||||
modified_by: str = Query(default="api"),
|
||||
expected_hash: Optional[str] = Query(default=None)
|
||||
):
|
||||
"""
|
||||
Replace the entire spec.
|
||||
|
||||
Validates the new spec before saving. Optionally check for conflicts
|
||||
using expected_hash parameter.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
try:
|
||||
new_hash = manager.save(spec, modified_by=modified_by, expected_hash=expected_hash)
|
||||
reloaded = manager.load()
|
||||
return SpecUpdateResponse(
|
||||
success=True,
|
||||
hash=new_hash,
|
||||
modified=reloaded.meta.modified or "",
|
||||
modified_by=modified_by
|
||||
)
|
||||
except SpecConflictError as e:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail={
|
||||
"message": str(e),
|
||||
"current_hash": e.current_hash
|
||||
}
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.patch("", response_model=SpecUpdateResponse)
|
||||
async def patch_spec(study_id: str, request: SpecPatchRequest):
|
||||
"""
|
||||
Partial update to spec using JSONPath.
|
||||
|
||||
Example paths:
|
||||
- "objectives[0].weight" - Update objective weight
|
||||
- "design_variables[1].bounds.max" - Update DV bound
|
||||
- "meta.description" - Update description
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
spec = manager.patch(request.path, request.value, modified_by=request.modified_by)
|
||||
return SpecUpdateResponse(
|
||||
success=True,
|
||||
hash=manager.get_hash(),
|
||||
modified=spec.meta.modified or "",
|
||||
modified_by=request.modified_by
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/validate", response_model=ValidationResponse)
|
||||
async def validate_spec(study_id: str):
|
||||
"""
|
||||
Validate the spec and return detailed report.
|
||||
|
||||
Returns errors, warnings, and summary of the spec contents.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
report = manager.validate_and_report()
|
||||
return ValidationResponse(
|
||||
valid=report.valid,
|
||||
errors=[e.model_dump() for e in report.errors],
|
||||
warnings=[w.model_dump() for w in report.warnings],
|
||||
summary=report.summary.model_dump()
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Node CRUD Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/nodes", response_model=NodeAddResponse)
|
||||
async def add_node(study_id: str, request: NodeAddRequest):
|
||||
"""
|
||||
Add a new node to the spec.
|
||||
|
||||
Supported types: designVar, extractor, objective, constraint
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
valid_types = ["designVar", "extractor", "objective", "constraint"]
|
||||
if request.type not in valid_types:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid node type '{request.type}'. Valid: {valid_types}"
|
||||
)
|
||||
|
||||
try:
|
||||
node_id = manager.add_node(request.type, request.data, modified_by=request.modified_by)
|
||||
return NodeAddResponse(
|
||||
success=True,
|
||||
node_id=node_id,
|
||||
message=f"Added {request.type} node: {node_id}"
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.patch("/nodes/{node_id}")
|
||||
async def update_node(study_id: str, node_id: str, request: NodeUpdateRequest):
|
||||
"""Update an existing node's properties."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.update_node(node_id, request.updates, modified_by=request.modified_by)
|
||||
return {"success": True, "message": f"Updated node {node_id}"}
|
||||
except SpecManagerError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/nodes/{node_id}")
|
||||
async def delete_node(
|
||||
study_id: str,
|
||||
node_id: str,
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""
|
||||
Delete a node and all edges referencing it.
|
||||
|
||||
Use with caution - this will also remove any objectives or constraints
|
||||
that reference a deleted extractor.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.remove_node(node_id, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Removed node {node_id}"}
|
||||
except SpecManagerError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Custom Function Endpoint
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/custom-functions", response_model=NodeAddResponse)
|
||||
async def add_custom_function(study_id: str, request: CustomFunctionRequest):
|
||||
"""
|
||||
Add a custom Python function as an extractor.
|
||||
|
||||
The function will be available in the optimization workflow.
|
||||
Claude can use this to add new physics extraction logic.
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
extractor_id = manager.add_custom_function(
|
||||
name=request.name,
|
||||
code=request.code,
|
||||
outputs=request.outputs,
|
||||
description=request.description,
|
||||
modified_by=request.modified_by
|
||||
)
|
||||
return NodeAddResponse(
|
||||
success=True,
|
||||
node_id=extractor_id,
|
||||
message=f"Added custom extractor: {request.name}"
|
||||
)
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# Separate router for non-study-specific endpoints
|
||||
validate_router = APIRouter(prefix="/spec", tags=["spec"])
|
||||
|
||||
|
||||
@validate_router.post("/validate-extractor")
|
||||
async def validate_custom_extractor(request: ExtractorValidationRequest):
|
||||
"""
|
||||
Validate custom extractor Python code.
|
||||
|
||||
Checks syntax, security patterns, and function signature.
|
||||
Does not require a study - can be used before adding to spec.
|
||||
"""
|
||||
try:
|
||||
from optimization_engine.extractors.custom_extractor_loader import (
|
||||
validate_extractor_code,
|
||||
ExtractorSecurityError,
|
||||
)
|
||||
|
||||
try:
|
||||
is_valid, errors = validate_extractor_code(request.source, request.function_name)
|
||||
return {
|
||||
"valid": is_valid,
|
||||
"errors": errors
|
||||
}
|
||||
except ExtractorSecurityError as e:
|
||||
return {
|
||||
"valid": False,
|
||||
"errors": [str(e)]
|
||||
}
|
||||
|
||||
except ImportError as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Custom extractor loader not available: {e}"
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Edge Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/edges")
|
||||
async def add_edge(
|
||||
study_id: str,
|
||||
source: str = Query(..., description="Source node ID"),
|
||||
target: str = Query(..., description="Target node ID"),
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""Add a canvas edge between two nodes."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.add_edge(source, target, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Added edge {source} -> {target}"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/edges")
|
||||
async def delete_edge(
|
||||
study_id: str,
|
||||
source: str = Query(..., description="Source node ID"),
|
||||
target: str = Query(..., description="Target node ID"),
|
||||
modified_by: str = Query(default="canvas")
|
||||
):
|
||||
"""Remove a canvas edge."""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if not manager.exists():
|
||||
raise HTTPException(status_code=404, detail=f"No spec found for study '{study_id}'")
|
||||
|
||||
try:
|
||||
manager.remove_edge(source, target, modified_by=modified_by)
|
||||
return {"success": True, "message": f"Removed edge {source} -> {target}"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# WebSocket Sync Endpoint
|
||||
# ============================================================================
|
||||
|
||||
class WebSocketSubscriber:
|
||||
"""WebSocket subscriber adapter."""
|
||||
|
||||
def __init__(self, websocket: WebSocket):
|
||||
self.websocket = websocket
|
||||
|
||||
async def send_json(self, data: Dict[str, Any]) -> None:
|
||||
await self.websocket.send_json(data)
|
||||
|
||||
|
||||
@router.websocket("/sync")
|
||||
async def websocket_sync(websocket: WebSocket, study_id: str):
|
||||
"""
|
||||
WebSocket endpoint for real-time spec sync.
|
||||
|
||||
Clients receive notifications when spec changes:
|
||||
- spec_updated: Spec was modified
|
||||
- node_added: New node added
|
||||
- node_removed: Node removed
|
||||
- validation_error: Validation failed
|
||||
"""
|
||||
await websocket.accept()
|
||||
|
||||
manager = get_manager(study_id)
|
||||
subscriber = WebSocketSubscriber(websocket)
|
||||
|
||||
# Subscribe to updates
|
||||
manager.subscribe(subscriber)
|
||||
|
||||
try:
|
||||
# Send initial connection ack
|
||||
await websocket.send_json({
|
||||
"type": "connection_ack",
|
||||
"study_id": study_id,
|
||||
"hash": manager.get_hash() if manager.exists() else None,
|
||||
"message": "Connected to spec sync"
|
||||
})
|
||||
|
||||
# Keep connection alive and handle client messages
|
||||
while True:
|
||||
try:
|
||||
data = await asyncio.wait_for(
|
||||
websocket.receive_json(),
|
||||
timeout=30.0 # Heartbeat interval
|
||||
)
|
||||
|
||||
# Handle client messages
|
||||
msg_type = data.get("type")
|
||||
|
||||
if msg_type == "ping":
|
||||
await websocket.send_json({"type": "pong"})
|
||||
|
||||
elif msg_type == "patch_node":
|
||||
# Client requests node update
|
||||
try:
|
||||
manager.update_node(
|
||||
data["node_id"],
|
||||
data.get("data", {}),
|
||||
modified_by=data.get("modified_by", "canvas")
|
||||
)
|
||||
except Exception as e:
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e)
|
||||
})
|
||||
|
||||
elif msg_type == "update_position":
|
||||
# Client updates node position
|
||||
try:
|
||||
manager.update_node_position(
|
||||
data["node_id"],
|
||||
data["position"],
|
||||
modified_by=data.get("modified_by", "canvas")
|
||||
)
|
||||
except Exception as e:
|
||||
await websocket.send_json({
|
||||
"type": "error",
|
||||
"message": str(e)
|
||||
})
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Send heartbeat
|
||||
await websocket.send_json({"type": "heartbeat"})
|
||||
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
finally:
|
||||
manager.unsubscribe(subscriber)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Create/Initialize Spec
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/create")
|
||||
async def create_spec(
|
||||
study_id: str,
|
||||
spec: Dict[str, Any],
|
||||
modified_by: str = Query(default="api")
|
||||
):
|
||||
"""
|
||||
Create a new spec for a study.
|
||||
|
||||
Use this when migrating from old config or creating a new study.
|
||||
Will fail if spec already exists (use PUT to replace).
|
||||
"""
|
||||
manager = get_manager(study_id)
|
||||
|
||||
if manager.exists():
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail=f"Spec already exists for '{study_id}'. Use PUT to replace."
|
||||
)
|
||||
|
||||
try:
|
||||
# Ensure meta fields are set
|
||||
if "meta" not in spec:
|
||||
spec["meta"] = {}
|
||||
spec["meta"]["created_by"] = modified_by
|
||||
|
||||
new_hash = manager.save(spec, modified_by=modified_by)
|
||||
return {
|
||||
"success": True,
|
||||
"hash": new_hash,
|
||||
"message": f"Created spec for {study_id}"
|
||||
}
|
||||
except SpecValidationError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
Reference in New Issue
Block a user