feat: Add Protocol 13 adaptive optimization, Plotly charts, and dashboard improvements

## Protocol 13: Adaptive Multi-Objective Optimization
- Iterative FEA + Neural Network surrogate workflow
- Initial FEA sampling, NN training, NN-accelerated search
- FEA validation of top NN predictions, retraining loop
- adaptive_state.json tracks iteration history and best values
- M1 mirror study (V11) with 103 FEA, 3000 NN trials

## Dashboard Visualization Enhancements
- Added Plotly.js interactive charts (parallel coords, Pareto, convergence)
- Lazy loading with React.lazy() for performance
- Code splitting: plotly.js-basic-dist (~1MB vs 3.5MB)
- Chart library toggle (Recharts default, Plotly on-demand)
- ExpandableChart component for full-screen modal views
- ConsoleOutput component for real-time log viewing

## Documentation
- Protocol 13 detailed documentation
- Dashboard visualization guide
- Plotly components README
- Updated run-optimization skill with Mode 5 (adaptive)

## Bug Fixes
- Fixed TypeScript errors in dashboard components
- Fixed Card component to accept ReactNode title
- Removed unused imports across components

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Antoine
2025-12-04 07:41:54 -05:00
parent e74f1ccf36
commit 8cbdbcad78
270 changed files with 15471 additions and 517 deletions

View File

@@ -83,15 +83,27 @@ class DesignConditionedConv(MessagePassing):
Args:
x: Node features [num_nodes, in_channels]
edge_index: Edge connectivity [2, num_edges]
design_features: Design parameters [design_dim] (broadcast to all nodes)
design_features: Design parameters [hidden] or [num_nodes, hidden]
edge_attr: Edge features [num_edges, edge_dim] (optional)
Returns:
Updated node features [num_nodes, out_channels]
"""
# Broadcast design features to match number of nodes
num_nodes = x.size(0)
design_broadcast = design_features.unsqueeze(0).expand(num_nodes, -1)
# Handle different input shapes for design_features
if design_features.dim() == 1:
# Single design vector [hidden] -> broadcast to all nodes
design_broadcast = design_features.unsqueeze(0).expand(num_nodes, -1)
elif design_features.dim() == 2 and design_features.size(0) == num_nodes:
# Already per-node [num_nodes, hidden]
design_broadcast = design_features
elif design_features.dim() == 2 and design_features.size(0) == 1:
# Single design [1, hidden] -> broadcast
design_broadcast = design_features.expand(num_nodes, -1)
else:
# Fallback: take mean across batch dimension if needed
design_broadcast = design_features.mean(dim=0).unsqueeze(0).expand(num_nodes, -1)
return self.propagate(
edge_index,
@@ -319,54 +331,53 @@ class ParametricFieldPredictor(nn.Module):
- displacement: (optional) Displacement field [num_nodes, 6]
"""
x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
batch = data.batch if hasattr(data, 'batch') else torch.zeros(x.size(0), dtype=torch.long, device=x.device)
num_nodes = x.size(0)
# Handle design params shape
# Handle design params shape - ensure 2D [batch_size, design_dim]
if design_params.dim() == 1:
design_params = design_params.unsqueeze(0)
# Encode design parameters
design_encoded = self.design_encoder(design_params) # [batch, hidden]
batch_size = design_params.size(0)
# For single graph, broadcast design to all nodes
if design_encoded.size(0) == 1:
design_for_nodes = design_encoded.squeeze(0) # [hidden]
else:
# For batched graphs, get design for each node based on batch assignment
design_for_nodes = design_encoded[batch] # [num_nodes, hidden]
# Encode design parameters: [batch_size, design_dim] -> [batch_size, hidden]
design_encoded = self.design_encoder(design_params)
# Encode nodes
x = self.node_encoder(x) # [num_nodes, hidden]
# Encode nodes (shared across all designs)
x_encoded = self.node_encoder(x) # [num_nodes, hidden]
# Encode edges
# Encode edges (shared across all designs)
if edge_attr is not None:
edge_features = self.edge_encoder(edge_attr) # [num_edges, hidden//2]
else:
edge_features = None
# Message passing with design conditioning
node_embeddings = x
for conv, norm, dropout in zip(self.conv_layers, self.layer_norms, self.dropouts):
# Use appropriate design features based on batching
if design_params.size(0) == 1:
design_input = design_for_nodes
else:
# For batched case, we need to handle per-node design features
design_input = design_for_nodes[0] # Simplified - use first
# Process each design in the batch
all_graph_features = []
x_new = conv(x, edge_index, design_input, edge_features)
x = x + dropout(x_new) # Residual connection
x = norm(x)
for i in range(batch_size):
# Get design for this sample
design_i = design_encoded[i] # [hidden]
# Global pooling
x_mean = global_mean_pool(x, batch) # [batch, hidden]
x_max = global_max_pool(x, batch) # [batch, hidden]
# Reset node features for this sample
x = x_encoded.clone()
# Concatenate pooled features with design encoding
if design_encoded.size(0) == 1 and x_mean.size(0) > 1:
design_encoded = design_encoded.expand(x_mean.size(0), -1)
# Message passing with design conditioning
for conv, norm, dropout in zip(self.conv_layers, self.layer_norms, self.dropouts):
x_new = conv(x, edge_index, design_i, edge_features)
x = x + dropout(x_new) # Residual connection
x = norm(x)
graph_features = torch.cat([x_mean, x_max, design_encoded], dim=-1) # [batch, 3*hidden]
# Global pooling for this sample
batch_idx = torch.zeros(num_nodes, dtype=torch.long, device=x.device)
x_mean = global_mean_pool(x, batch_idx) # [1, hidden]
x_max = global_max_pool(x, batch_idx) # [1, hidden]
# Concatenate pooled + design features
graph_feat = torch.cat([x_mean, x_max, design_encoded[i:i+1]], dim=-1) # [1, 3*hidden]
all_graph_features.append(graph_feat)
# Stack all samples
graph_features = torch.cat(all_graph_features, dim=0) # [batch_size, 3*hidden]
# Predict objectives
mass = self.mass_head(graph_features).squeeze(-1)
@@ -381,9 +392,9 @@ class ParametricFieldPredictor(nn.Module):
'max_stress': max_stress
}
# Optionally return displacement field
# Optionally return displacement field (uses last processed x)
if return_fields:
displacement_field = self.field_decoder(node_embeddings) # [num_nodes, 6]
displacement_field = self.field_decoder(x) # [num_nodes, 6]
results['displacement'] = displacement_field
return results