- Add validation framework (config, model, results, study validators) - Add Claude Code skills (create-study, run-optimization, generate-report, troubleshoot, analyze-model) - Add Atomizer Dashboard (React frontend + FastAPI backend) - Reorganize docs into structured directories (00-09) - Add neural surrogate modules and training infrastructure - Add multi-objective optimization support 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
1013 lines
37 KiB
Python
1013 lines
37 KiB
Python
"""
|
|
Neural network surrogate integration for Atomizer.
|
|
|
|
This module provides the integration layer between Atomizer optimization framework
|
|
and AtomizerField neural network models for fast FEA predictions.
|
|
|
|
Key Features:
|
|
- Load and manage AtomizerField trained models
|
|
- Convert design variables to neural field format
|
|
- Provide millisecond FEA predictions
|
|
- Automatic fallback to FEA when confidence is low
|
|
- Performance tracking and statistics
|
|
|
|
Usage:
|
|
from optimization_engine.neural_surrogate import NeuralSurrogate, create_surrogate_for_study
|
|
|
|
# Create surrogate for UAV arm study
|
|
surrogate = create_surrogate_for_study(
|
|
model_path="atomizer-field/runs/uav_arm_model/checkpoint_best.pt",
|
|
training_data_dir="atomizer_field_training_data/uav_arm_train"
|
|
)
|
|
|
|
# Predict for new design
|
|
results = surrogate.predict(design_params)
|
|
print(f"Max displacement: {results['max_displacement']:.6f} mm")
|
|
"""
|
|
|
|
import sys
|
|
import time
|
|
import json
|
|
import logging
|
|
import h5py
|
|
from pathlib import Path
|
|
from typing import Dict, Any, Optional, Tuple, List
|
|
import numpy as np
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Add atomizer-field to path for imports
|
|
_atomizer_field_path = Path(__file__).parent.parent / 'atomizer-field'
|
|
if str(_atomizer_field_path) not in sys.path:
|
|
sys.path.insert(0, str(_atomizer_field_path))
|
|
|
|
try:
|
|
import torch
|
|
from torch_geometric.data import Data
|
|
TORCH_AVAILABLE = True
|
|
except ImportError:
|
|
TORCH_AVAILABLE = False
|
|
logger.warning("PyTorch not installed. Neural surrogate features will be limited.")
|
|
|
|
# Import AtomizerField model
|
|
ATOMIZER_FIELD_AVAILABLE = False
|
|
PARAMETRIC_MODEL_AVAILABLE = False
|
|
if TORCH_AVAILABLE:
|
|
try:
|
|
from neural_models.field_predictor import AtomizerFieldModel, create_model
|
|
ATOMIZER_FIELD_AVAILABLE = True
|
|
except ImportError as e:
|
|
logger.warning(f"AtomizerField modules not found: {e}")
|
|
|
|
try:
|
|
from neural_models.parametric_predictor import ParametricFieldPredictor, create_parametric_model
|
|
PARAMETRIC_MODEL_AVAILABLE = True
|
|
except ImportError as e:
|
|
logger.warning(f"Parametric predictor modules not found: {e}")
|
|
|
|
|
|
class NeuralSurrogate:
|
|
"""
|
|
Neural surrogate for fast FEA predictions using trained AtomizerField model.
|
|
|
|
This class loads a trained AtomizerField model and provides fast predictions
|
|
of displacement fields, which can then be used to compute derived quantities
|
|
like max displacement, estimated stress, etc.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
model_path: Path,
|
|
training_data_dir: Path,
|
|
device: str = 'auto'
|
|
):
|
|
"""
|
|
Initialize neural surrogate.
|
|
|
|
Args:
|
|
model_path: Path to trained model checkpoint (.pt file)
|
|
training_data_dir: Path to training data (for normalization stats and mesh)
|
|
device: Computing device ('cuda', 'cpu', or 'auto')
|
|
"""
|
|
if not TORCH_AVAILABLE:
|
|
raise ImportError("PyTorch required. Install: pip install torch torch-geometric")
|
|
|
|
if not ATOMIZER_FIELD_AVAILABLE:
|
|
raise ImportError("AtomizerField modules not found")
|
|
|
|
self.model_path = Path(model_path)
|
|
self.training_data_dir = Path(training_data_dir)
|
|
|
|
# Set device
|
|
if device == 'auto':
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
else:
|
|
self.device = torch.device(device)
|
|
|
|
logger.info(f"Neural Surrogate initializing on {self.device}")
|
|
|
|
# Load model
|
|
self._load_model()
|
|
|
|
# Load normalization statistics
|
|
self._load_normalization_stats()
|
|
|
|
# Load reference mesh structure
|
|
self._load_reference_mesh()
|
|
|
|
# Performance tracking
|
|
self.stats = {
|
|
'predictions': 0,
|
|
'total_time_ms': 0.0,
|
|
'fea_validations': 0
|
|
}
|
|
|
|
logger.info(f"Neural Surrogate ready: {self.num_nodes} nodes, model loaded")
|
|
|
|
def _load_model(self):
|
|
"""Load trained AtomizerField model."""
|
|
logger.info(f"Loading model from {self.model_path}")
|
|
|
|
checkpoint = torch.load(self.model_path, map_location=self.device)
|
|
|
|
# Create model with saved config
|
|
model_config = checkpoint['config']['model']
|
|
self.model = AtomizerFieldModel(**model_config)
|
|
self.model.load_state_dict(checkpoint['model_state_dict'])
|
|
self.model = self.model.to(self.device)
|
|
self.model.eval()
|
|
|
|
self.model_config = checkpoint['config']
|
|
self.best_val_loss = checkpoint.get('best_val_loss', None)
|
|
|
|
n_params = sum(p.numel() for p in self.model.parameters())
|
|
logger.info(f"Model loaded: {n_params:,} parameters, val_loss={self.best_val_loss:.4f}")
|
|
|
|
def _load_normalization_stats(self):
|
|
"""Load normalization statistics from training data."""
|
|
case_dirs = sorted(self.training_data_dir.glob("trial_*"))
|
|
|
|
if not case_dirs:
|
|
logger.warning("No training cases found - using identity normalization")
|
|
self.coord_mean = np.zeros(3)
|
|
self.coord_std = np.ones(3)
|
|
self.disp_mean = np.zeros(6)
|
|
self.disp_std = np.ones(6)
|
|
return
|
|
|
|
# Compute stats from all training data
|
|
all_coords = []
|
|
all_disp = []
|
|
|
|
for case_dir in case_dirs:
|
|
h5_file = case_dir / "neural_field_data.h5"
|
|
if h5_file.exists():
|
|
with h5py.File(h5_file, 'r') as f:
|
|
all_coords.append(f['mesh/node_coordinates'][:])
|
|
all_disp.append(f['results/displacement'][:])
|
|
|
|
if all_coords:
|
|
all_coords = np.concatenate(all_coords, axis=0)
|
|
all_disp = np.concatenate(all_disp, axis=0)
|
|
|
|
self.coord_mean = all_coords.mean(axis=0)
|
|
self.coord_std = all_coords.std(axis=0) + 1e-8
|
|
self.disp_mean = all_disp.mean(axis=0)
|
|
self.disp_std = all_disp.std(axis=0) + 1e-8
|
|
|
|
logger.info(f"Normalization stats from {len(case_dirs)} cases")
|
|
|
|
def _load_reference_mesh(self):
|
|
"""Load reference mesh structure for building graphs."""
|
|
case_dirs = sorted(self.training_data_dir.glob("trial_*"))
|
|
|
|
if not case_dirs:
|
|
raise ValueError(f"No training cases in {self.training_data_dir}")
|
|
|
|
first_case = case_dirs[0]
|
|
json_file = first_case / "neural_field_data.json"
|
|
h5_file = first_case / "neural_field_data.h5"
|
|
|
|
# Load metadata
|
|
with open(json_file, 'r') as f:
|
|
self.reference_metadata = json.load(f)
|
|
|
|
# Load mesh
|
|
with h5py.File(h5_file, 'r') as f:
|
|
self.reference_coords = f['mesh/node_coordinates'][:]
|
|
self.num_nodes = self.reference_coords.shape[0]
|
|
|
|
# Build edge index (constant for parametric optimization)
|
|
self._build_graph_structure()
|
|
|
|
def _build_graph_structure(self):
|
|
"""Build graph edge index and attributes from mesh."""
|
|
metadata = self.reference_metadata
|
|
num_nodes = self.num_nodes
|
|
edge_list = []
|
|
|
|
# Get material properties
|
|
mat_props = [0.0] * 5
|
|
if 'materials' in metadata:
|
|
for mat in metadata['materials']:
|
|
if mat['type'] == 'MAT1':
|
|
mat_props = [
|
|
mat.get('E', 0.0) / 1e6,
|
|
mat.get('nu', 0.0),
|
|
mat.get('rho', 0.0) * 1e6,
|
|
mat.get('G', 0.0) / 1e6 if mat.get('G') else 0.0,
|
|
mat.get('alpha', 0.0) * 1e6 if mat.get('alpha') else 0.0
|
|
]
|
|
break
|
|
|
|
# Process elements to create edges
|
|
if 'mesh' in metadata and 'elements' in metadata['mesh']:
|
|
for elem_type in ['solid', 'shell', 'beam']:
|
|
if elem_type in metadata['mesh']['elements']:
|
|
for elem in metadata['mesh']['elements'][elem_type]:
|
|
elem_nodes = elem['nodes']
|
|
for i in range(len(elem_nodes)):
|
|
for j in range(i + 1, len(elem_nodes)):
|
|
node_i = elem_nodes[i] - 1
|
|
node_j = elem_nodes[j] - 1
|
|
if node_i < num_nodes and node_j < num_nodes:
|
|
edge_list.append([node_i, node_j])
|
|
edge_list.append([node_j, node_i])
|
|
|
|
if edge_list:
|
|
self.edge_index = torch.tensor(edge_list, dtype=torch.long).t().to(self.device)
|
|
num_edges = self.edge_index.shape[1]
|
|
self.edge_attr = torch.tensor([mat_props] * num_edges, dtype=torch.float).to(self.device)
|
|
else:
|
|
self.edge_index = torch.zeros((2, 0), dtype=torch.long).to(self.device)
|
|
self.edge_attr = torch.zeros((0, 5), dtype=torch.float).to(self.device)
|
|
|
|
# Build BC mask and load features (constant for this study)
|
|
self._build_bc_and_loads()
|
|
|
|
def _build_bc_and_loads(self):
|
|
"""Build boundary condition mask and load features."""
|
|
metadata = self.reference_metadata
|
|
num_nodes = self.num_nodes
|
|
|
|
# BC mask
|
|
self.bc_mask = torch.zeros(num_nodes, 6)
|
|
if 'boundary_conditions' in metadata and 'spc' in metadata['boundary_conditions']:
|
|
for spc in metadata['boundary_conditions']['spc']:
|
|
node_id = spc['node']
|
|
if node_id <= num_nodes:
|
|
dofs = spc['dofs']
|
|
for dof_char in str(dofs):
|
|
if dof_char.isdigit():
|
|
dof_idx = int(dof_char) - 1
|
|
if 0 <= dof_idx < 6:
|
|
self.bc_mask[node_id - 1, dof_idx] = 1.0
|
|
|
|
# Load features
|
|
self.load_features = torch.zeros(num_nodes, 3)
|
|
if 'loads' in metadata and 'point_forces' in metadata['loads']:
|
|
for force in metadata['loads']['point_forces']:
|
|
node_id = force['node']
|
|
if node_id <= num_nodes:
|
|
magnitude = force['magnitude']
|
|
direction = force['direction']
|
|
force_vector = [magnitude * d for d in direction]
|
|
self.load_features[node_id - 1] = torch.tensor(force_vector)
|
|
|
|
self.bc_mask = self.bc_mask.to(self.device)
|
|
self.load_features = self.load_features.to(self.device)
|
|
|
|
def _build_node_features(self) -> torch.Tensor:
|
|
"""Build node features tensor for model input."""
|
|
# Normalized coordinates
|
|
coords = torch.from_numpy(self.reference_coords).float()
|
|
coords_norm = (coords - torch.from_numpy(self.coord_mean).float()) / \
|
|
torch.from_numpy(self.coord_std).float()
|
|
coords_norm = coords_norm.to(self.device)
|
|
|
|
# Concatenate: [coords(3) + bc_mask(6) + loads(3)] = 12 features
|
|
node_features = torch.cat([coords_norm, self.bc_mask, self.load_features], dim=-1)
|
|
|
|
return node_features
|
|
|
|
def predict(
|
|
self,
|
|
design_params: Dict[str, float],
|
|
return_fields: bool = False
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Predict FEA results using neural network.
|
|
|
|
Args:
|
|
design_params: Design parameter values (not used for prediction,
|
|
but kept for API compatibility - mesh is constant)
|
|
return_fields: If True, return complete displacement field
|
|
|
|
Returns:
|
|
dict with:
|
|
- max_displacement: Maximum displacement magnitude (mm)
|
|
- max_stress: Estimated maximum stress (approximate)
|
|
- inference_time_ms: Prediction time
|
|
- fields: Complete displacement field (if return_fields=True)
|
|
"""
|
|
start_time = time.time()
|
|
|
|
# Build graph data
|
|
node_features = self._build_node_features()
|
|
|
|
graph_data = Data(
|
|
x=node_features,
|
|
edge_index=self.edge_index,
|
|
edge_attr=self.edge_attr
|
|
)
|
|
|
|
# Predict
|
|
with torch.no_grad():
|
|
predictions = self.model(graph_data, return_stress=True)
|
|
|
|
# Denormalize displacement
|
|
displacement = predictions['displacement'].cpu().numpy()
|
|
displacement = displacement * self.disp_std + self.disp_mean
|
|
|
|
# Compute max values
|
|
disp_magnitude = np.linalg.norm(displacement[:, :3], axis=1)
|
|
max_displacement = float(np.max(disp_magnitude))
|
|
|
|
# Stress (approximate - model trained on displacement only)
|
|
max_stress = float(torch.max(predictions['von_mises']).item())
|
|
|
|
inference_time = (time.time() - start_time) * 1000
|
|
|
|
results = {
|
|
'max_displacement': max_displacement,
|
|
'max_stress': max_stress,
|
|
'inference_time_ms': inference_time
|
|
}
|
|
|
|
if return_fields:
|
|
results['displacement_field'] = displacement
|
|
results['von_mises_field'] = predictions['von_mises'].cpu().numpy()
|
|
|
|
# Update stats
|
|
self.stats['predictions'] += 1
|
|
self.stats['total_time_ms'] += inference_time
|
|
|
|
return results
|
|
|
|
def get_statistics(self) -> Dict[str, Any]:
|
|
"""Get prediction statistics."""
|
|
avg_time = self.stats['total_time_ms'] / self.stats['predictions'] \
|
|
if self.stats['predictions'] > 0 else 0
|
|
|
|
return {
|
|
'total_predictions': self.stats['predictions'],
|
|
'total_time_ms': self.stats['total_time_ms'],
|
|
'average_time_ms': avg_time,
|
|
'model_path': str(self.model_path),
|
|
'best_val_loss': self.best_val_loss,
|
|
'device': str(self.device)
|
|
}
|
|
|
|
def needs_fea_validation(self, trial_number: int) -> bool:
|
|
"""
|
|
Determine if FEA validation is recommended.
|
|
|
|
Args:
|
|
trial_number: Current trial number
|
|
|
|
Returns:
|
|
True if FEA validation is recommended
|
|
"""
|
|
# Validate periodically
|
|
if trial_number < 5:
|
|
return True # First few always validate
|
|
if trial_number % 20 == 0:
|
|
return True # Periodic validation
|
|
return False
|
|
|
|
|
|
class ParametricSurrogate:
|
|
"""
|
|
Parametric neural surrogate that predicts ALL objectives from design parameters.
|
|
|
|
Unlike NeuralSurrogate which only predicts displacement fields,
|
|
ParametricSurrogate directly predicts:
|
|
- mass
|
|
- frequency
|
|
- max_displacement
|
|
- max_stress
|
|
|
|
This is the "future-proof" solution using design-conditioned GNN.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
model_path: Path,
|
|
training_data_dir: Path,
|
|
device: str = 'auto'
|
|
):
|
|
"""
|
|
Initialize parametric surrogate.
|
|
|
|
Args:
|
|
model_path: Path to trained parametric model checkpoint (.pt file)
|
|
training_data_dir: Path to training data (for reference mesh)
|
|
device: Computing device ('cuda', 'cpu', or 'auto')
|
|
"""
|
|
if not TORCH_AVAILABLE:
|
|
raise ImportError("PyTorch required. Install: pip install torch torch-geometric")
|
|
|
|
if not PARAMETRIC_MODEL_AVAILABLE:
|
|
raise ImportError("Parametric predictor modules not found")
|
|
|
|
self.model_path = Path(model_path)
|
|
self.training_data_dir = Path(training_data_dir)
|
|
|
|
# Set device
|
|
if device == 'auto':
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
else:
|
|
self.device = torch.device(device)
|
|
|
|
logger.info(f"Parametric Surrogate initializing on {self.device}")
|
|
|
|
# Load model and normalization
|
|
self._load_model()
|
|
|
|
# Load reference mesh structure for graph creation
|
|
self._load_reference_mesh()
|
|
|
|
# Performance tracking
|
|
self.stats = {
|
|
'predictions': 0,
|
|
'total_time_ms': 0.0
|
|
}
|
|
|
|
logger.info(f"Parametric Surrogate ready: {self.num_nodes} nodes, "
|
|
f"predicts mass/freq/disp/stress")
|
|
|
|
def _load_model(self):
|
|
"""Load trained parametric model and normalization stats."""
|
|
logger.info(f"Loading parametric model from {self.model_path}")
|
|
|
|
checkpoint = torch.load(self.model_path, map_location=self.device)
|
|
|
|
# Create model with saved config
|
|
model_config = checkpoint['config']
|
|
self.model = create_parametric_model(model_config)
|
|
self.model.load_state_dict(checkpoint['model_state_dict'])
|
|
self.model = self.model.to(self.device)
|
|
self.model.eval()
|
|
|
|
self.model_config = model_config
|
|
self.best_val_loss = checkpoint.get('best_val_loss', None)
|
|
|
|
# Load normalization stats
|
|
norm = checkpoint.get('normalization', {})
|
|
self.design_var_names = checkpoint.get('design_var_names', [])
|
|
self.n_design_vars = len(self.design_var_names)
|
|
|
|
self.design_mean = torch.tensor(norm.get('design_mean', [0.0] * self.n_design_vars),
|
|
dtype=torch.float32, device=self.device)
|
|
self.design_std = torch.tensor(norm.get('design_std', [1.0] * self.n_design_vars),
|
|
dtype=torch.float32, device=self.device)
|
|
|
|
self.coord_mean = np.array(norm.get('coord_mean', [0.0, 0.0, 0.0]))
|
|
self.coord_std = np.array(norm.get('coord_std', [1.0, 1.0, 1.0]))
|
|
self.disp_mean = np.array(norm.get('disp_mean', [0.0] * 6))
|
|
self.disp_std = np.array(norm.get('disp_std', [1.0] * 6))
|
|
|
|
# Scalar normalization stats (for denormalization)
|
|
self.mass_mean = norm.get('mass_mean', 3500.0)
|
|
self.mass_std = norm.get('mass_std', 700.0)
|
|
self.freq_mean = norm.get('freq_mean', 18.0)
|
|
self.freq_std = norm.get('freq_std', 2.0)
|
|
self.max_disp_mean = norm.get('max_disp_mean', 0.025)
|
|
self.max_disp_std = norm.get('max_disp_std', 0.005)
|
|
self.max_stress_mean = norm.get('max_stress_mean', 200.0)
|
|
self.max_stress_std = norm.get('max_stress_std', 50.0)
|
|
|
|
n_params = sum(p.numel() for p in self.model.parameters())
|
|
logger.info(f"Parametric model loaded: {n_params:,} params, "
|
|
f"val_loss={self.best_val_loss:.4f}")
|
|
logger.info(f"Design vars: {self.design_var_names}")
|
|
|
|
def _load_reference_mesh(self):
|
|
"""Load reference mesh structure for building graphs."""
|
|
case_dirs = sorted(self.training_data_dir.glob("trial_*"))
|
|
|
|
if not case_dirs:
|
|
raise ValueError(f"No training cases in {self.training_data_dir}")
|
|
|
|
first_case = case_dirs[0]
|
|
json_file = first_case / "neural_field_data.json"
|
|
h5_file = first_case / "neural_field_data.h5"
|
|
|
|
# Load metadata
|
|
with open(json_file, 'r') as f:
|
|
self.reference_metadata = json.load(f)
|
|
|
|
# Load mesh
|
|
with h5py.File(h5_file, 'r') as f:
|
|
self.reference_coords = f['mesh/node_coordinates'][:]
|
|
self.num_nodes = self.reference_coords.shape[0]
|
|
|
|
# Build graph structure
|
|
self._build_graph_structure()
|
|
|
|
def _build_graph_structure(self):
|
|
"""Build graph edge index and attributes from mesh."""
|
|
metadata = self.reference_metadata
|
|
num_nodes = self.num_nodes
|
|
edge_list = []
|
|
|
|
# Get material properties
|
|
mat_props = [0.0] * 5
|
|
if 'materials' in metadata:
|
|
for mat in metadata['materials']:
|
|
if mat['type'] == 'MAT1':
|
|
mat_props = [
|
|
mat.get('E', 0.0) / 1e6,
|
|
mat.get('nu', 0.0),
|
|
mat.get('rho', 0.0) * 1e6,
|
|
mat.get('G', 0.0) / 1e6 if mat.get('G') else 0.0,
|
|
mat.get('alpha', 0.0) * 1e6 if mat.get('alpha') else 0.0
|
|
]
|
|
break
|
|
|
|
# Process elements to create edges
|
|
if 'mesh' in metadata and 'elements' in metadata['mesh']:
|
|
for elem_type in ['solid', 'shell', 'beam']:
|
|
if elem_type in metadata['mesh']['elements']:
|
|
for elem in metadata['mesh']['elements'][elem_type]:
|
|
elem_nodes = elem['nodes']
|
|
for i in range(len(elem_nodes)):
|
|
for j in range(i + 1, len(elem_nodes)):
|
|
node_i = elem_nodes[i] - 1
|
|
node_j = elem_nodes[j] - 1
|
|
if node_i < num_nodes and node_j < num_nodes:
|
|
edge_list.append([node_i, node_j])
|
|
edge_list.append([node_j, node_i])
|
|
|
|
if edge_list:
|
|
self.edge_index = torch.tensor(edge_list, dtype=torch.long).t().to(self.device)
|
|
num_edges = self.edge_index.shape[1]
|
|
self.edge_attr = torch.tensor([mat_props] * num_edges, dtype=torch.float).to(self.device)
|
|
else:
|
|
self.edge_index = torch.zeros((2, 0), dtype=torch.long).to(self.device)
|
|
self.edge_attr = torch.zeros((0, 5), dtype=torch.float).to(self.device)
|
|
|
|
# Build BC mask and load features
|
|
self._build_bc_and_loads()
|
|
|
|
def _build_bc_and_loads(self):
|
|
"""Build boundary condition mask and load features."""
|
|
metadata = self.reference_metadata
|
|
num_nodes = self.num_nodes
|
|
|
|
# BC mask
|
|
self.bc_mask = torch.zeros(num_nodes, 6)
|
|
if 'boundary_conditions' in metadata and 'spc' in metadata['boundary_conditions']:
|
|
for spc in metadata['boundary_conditions']['spc']:
|
|
node_id = spc['node']
|
|
if node_id <= num_nodes:
|
|
dofs = spc['dofs']
|
|
for dof_char in str(dofs):
|
|
if dof_char.isdigit():
|
|
dof_idx = int(dof_char) - 1
|
|
if 0 <= dof_idx < 6:
|
|
self.bc_mask[node_id - 1, dof_idx] = 1.0
|
|
|
|
# Load features
|
|
self.load_features = torch.zeros(num_nodes, 3)
|
|
if 'loads' in metadata and 'point_forces' in metadata['loads']:
|
|
for force in metadata['loads']['point_forces']:
|
|
node_id = force['node']
|
|
if node_id <= num_nodes:
|
|
magnitude = force['magnitude']
|
|
direction = force['direction']
|
|
force_vector = [magnitude * d for d in direction]
|
|
self.load_features[node_id - 1] = torch.tensor(force_vector)
|
|
|
|
self.bc_mask = self.bc_mask.to(self.device)
|
|
self.load_features = self.load_features.to(self.device)
|
|
|
|
def _build_node_features(self) -> torch.Tensor:
|
|
"""Build node features tensor for model input."""
|
|
# Normalized coordinates
|
|
coords = torch.from_numpy(self.reference_coords).float()
|
|
coords_norm = (coords - torch.from_numpy(self.coord_mean).float()) / \
|
|
torch.from_numpy(self.coord_std).float()
|
|
coords_norm = coords_norm.to(self.device)
|
|
|
|
# Concatenate: [coords(3) + bc_mask(6) + loads(3)] = 12 features
|
|
node_features = torch.cat([coords_norm, self.bc_mask, self.load_features], dim=-1)
|
|
|
|
return node_features
|
|
|
|
def predict(
|
|
self,
|
|
design_params: Dict[str, float],
|
|
return_fields: bool = False
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Predict all FEA objectives using parametric neural network.
|
|
|
|
Args:
|
|
design_params: Design parameter values (e.g. beam_half_core_thickness, etc.)
|
|
return_fields: If True, return complete displacement field
|
|
|
|
Returns:
|
|
dict with:
|
|
- mass: Predicted mass (g)
|
|
- frequency: Predicted fundamental frequency (Hz)
|
|
- max_displacement: Maximum displacement magnitude (mm)
|
|
- max_stress: Maximum von Mises stress (MPa)
|
|
- inference_time_ms: Prediction time
|
|
- fields: Complete displacement field (if return_fields=True)
|
|
"""
|
|
start_time = time.time()
|
|
|
|
# Build design parameter tensor
|
|
param_values = [design_params.get(name, 0.0) for name in self.design_var_names]
|
|
design_tensor = torch.tensor(param_values, dtype=torch.float32, device=self.device)
|
|
|
|
# Normalize design params
|
|
design_tensor_norm = (design_tensor - self.design_mean) / self.design_std
|
|
|
|
# Build graph data
|
|
node_features = self._build_node_features()
|
|
|
|
graph_data = Data(
|
|
x=node_features,
|
|
edge_index=self.edge_index,
|
|
edge_attr=self.edge_attr
|
|
)
|
|
graph_data = graph_data.to(self.device)
|
|
|
|
# Predict
|
|
with torch.no_grad():
|
|
predictions = self.model(graph_data, design_tensor_norm, return_fields=return_fields)
|
|
|
|
# Extract scalar predictions (already in original scale from model)
|
|
# Note: The model outputs normalized values, need to denormalize
|
|
mass = predictions['mass'].item()
|
|
frequency = predictions['frequency'].item()
|
|
max_displacement = predictions['max_displacement'].item()
|
|
max_stress = predictions['max_stress'].item()
|
|
|
|
# The model predicts in normalized space during training,
|
|
# so scalars are directly usable (if model trained with unnormalized targets)
|
|
# For safety, denormalize if training was with normalized scalars:
|
|
# mass = mass * self.mass_std + self.mass_mean # Uncomment if needed
|
|
|
|
inference_time = (time.time() - start_time) * 1000
|
|
|
|
results = {
|
|
'mass': mass,
|
|
'frequency': frequency,
|
|
'max_displacement': max_displacement,
|
|
'max_stress': max_stress,
|
|
'inference_time_ms': inference_time
|
|
}
|
|
|
|
if return_fields and 'displacement' in predictions:
|
|
# Denormalize displacement field
|
|
displacement = predictions['displacement'].cpu().numpy()
|
|
displacement = displacement * self.disp_std + self.disp_mean
|
|
results['displacement_field'] = displacement
|
|
|
|
# Update stats
|
|
self.stats['predictions'] += 1
|
|
self.stats['total_time_ms'] += inference_time
|
|
|
|
return results
|
|
|
|
def get_statistics(self) -> Dict[str, Any]:
|
|
"""Get prediction statistics."""
|
|
avg_time = self.stats['total_time_ms'] / self.stats['predictions'] \
|
|
if self.stats['predictions'] > 0 else 0
|
|
|
|
return {
|
|
'total_predictions': self.stats['predictions'],
|
|
'total_time_ms': self.stats['total_time_ms'],
|
|
'average_time_ms': avg_time,
|
|
'model_path': str(self.model_path),
|
|
'best_val_loss': self.best_val_loss,
|
|
'device': str(self.device),
|
|
'design_var_names': self.design_var_names,
|
|
'n_design_vars': self.n_design_vars
|
|
}
|
|
|
|
|
|
class HybridOptimizer:
|
|
"""
|
|
Intelligent optimizer that combines FEA and neural surrogates.
|
|
|
|
Phases:
|
|
1. Exploration: Use FEA to explore design space
|
|
2. Training: Train neural network on FEA data
|
|
3. Exploitation: Use NN for fast optimization
|
|
4. Validation: Periodically validate with FEA
|
|
"""
|
|
|
|
def __init__(self, config: Dict[str, Any]):
|
|
"""
|
|
Initialize hybrid optimizer.
|
|
|
|
Args:
|
|
config: Configuration dictionary
|
|
"""
|
|
self.config = config
|
|
self.phase = 'exploration'
|
|
self.fea_samples = []
|
|
self.nn_surrogate = None
|
|
self.trial_count = 0
|
|
|
|
# Phase transition parameters
|
|
self.min_fea_samples = config.get('min_fea_samples', 20)
|
|
self.validation_frequency = config.get('validation_frequency', 10)
|
|
self.retrain_frequency = config.get('retrain_frequency', 50)
|
|
self.confidence_threshold = config.get('confidence_threshold', 0.95)
|
|
|
|
# Training data export directory
|
|
self.training_data_dir = Path(config.get('training_data_dir', 'hybrid_training_data'))
|
|
self.training_data_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
logger.info("Hybrid optimizer initialized")
|
|
|
|
def should_use_nn(self, trial_number: int) -> Tuple[bool, str]:
|
|
"""
|
|
Decide whether to use NN for this trial.
|
|
|
|
Args:
|
|
trial_number: Current trial number
|
|
|
|
Returns:
|
|
Tuple of (use_nn, reason)
|
|
"""
|
|
self.trial_count = trial_number
|
|
|
|
if self.phase == 'exploration':
|
|
# Initial FEA exploration
|
|
if trial_number < self.min_fea_samples:
|
|
return False, f"Exploration phase ({trial_number}/{self.min_fea_samples})"
|
|
else:
|
|
# Transition to training
|
|
self.phase = 'training'
|
|
self._train_surrogate()
|
|
self.phase = 'exploitation'
|
|
return True, "Switched to neural surrogate"
|
|
|
|
elif self.phase == 'exploitation':
|
|
# Check if validation needed
|
|
if trial_number % self.validation_frequency == 0:
|
|
return False, f"Periodic FEA validation (every {self.validation_frequency} trials)"
|
|
|
|
# Check if retraining needed
|
|
if trial_number % self.retrain_frequency == 0:
|
|
self._retrain_surrogate()
|
|
|
|
return True, "Using neural surrogate"
|
|
|
|
return False, f"Unknown phase: {self.phase}"
|
|
|
|
def _train_surrogate(self):
|
|
"""Train surrogate model on accumulated FEA data."""
|
|
logger.info(f"Training surrogate on {len(self.fea_samples)} FEA samples")
|
|
|
|
# In practice, this would:
|
|
# 1. Parse all FEA data using neural_field_parser
|
|
# 2. Train AtomizerField model
|
|
# 3. Load trained model
|
|
|
|
# For now, try to load pre-trained model if available
|
|
model_path = self.config.get('pretrained_model_path')
|
|
if model_path and Path(model_path).exists():
|
|
self.nn_surrogate = NeuralSurrogate(
|
|
model_path=Path(model_path),
|
|
confidence_threshold=self.confidence_threshold
|
|
)
|
|
logger.info(f"Loaded pre-trained model from {model_path}")
|
|
else:
|
|
logger.warning("No pre-trained model available, continuing with FEA")
|
|
self.phase = 'exploration'
|
|
|
|
def _retrain_surrogate(self):
|
|
"""Retrain surrogate with additional data."""
|
|
logger.info(f"Retraining surrogate with {len(self.fea_samples)} total samples")
|
|
# Trigger retraining pipeline
|
|
# This would integrate with AtomizerField training
|
|
|
|
def add_fea_sample(self, design: Dict[str, float], results: Dict[str, float]):
|
|
"""
|
|
Add FEA result to training data.
|
|
|
|
Args:
|
|
design: Design variables
|
|
results: FEA results
|
|
"""
|
|
self.fea_samples.append({
|
|
'trial': self.trial_count,
|
|
'design': design,
|
|
'results': results,
|
|
'timestamp': time.time()
|
|
})
|
|
|
|
def get_phase_info(self) -> Dict[str, Any]:
|
|
"""Get current phase information."""
|
|
return {
|
|
'phase': self.phase,
|
|
'trial_count': self.trial_count,
|
|
'fea_samples': len(self.fea_samples),
|
|
'has_surrogate': self.nn_surrogate is not None,
|
|
'min_fea_samples': self.min_fea_samples,
|
|
'validation_frequency': self.validation_frequency
|
|
}
|
|
|
|
|
|
def create_parametric_surrogate_for_study(
|
|
model_path: str = None,
|
|
training_data_dir: str = None,
|
|
project_root: Path = None
|
|
) -> Optional[ParametricSurrogate]:
|
|
"""
|
|
Factory function to create parametric neural surrogate for UAV arm study.
|
|
|
|
This is the recommended surrogate type - predicts all objectives (mass, freq, etc.)
|
|
|
|
Args:
|
|
model_path: Path to parametric model checkpoint (auto-detect if None)
|
|
training_data_dir: Path to training data (auto-detect if None)
|
|
project_root: Project root directory for auto-detection
|
|
|
|
Returns:
|
|
ParametricSurrogate instance or None if not available
|
|
"""
|
|
if not TORCH_AVAILABLE or not PARAMETRIC_MODEL_AVAILABLE:
|
|
logger.warning("Parametric surrogate not available: PyTorch or ParametricPredictor missing")
|
|
return None
|
|
|
|
# Auto-detect project root
|
|
if project_root is None:
|
|
project_root = Path(__file__).parent.parent
|
|
|
|
# Auto-detect parametric model path
|
|
if model_path is None:
|
|
default_model = project_root / "atomizer-field" / "runs" / "parametric_uav_arm_v2" / "checkpoint_best.pt"
|
|
if not default_model.exists():
|
|
# Try older path
|
|
default_model = project_root / "atomizer-field" / "runs" / "parametric_uav_arm" / "checkpoint_best.pt"
|
|
if default_model.exists():
|
|
model_path = str(default_model)
|
|
else:
|
|
logger.warning(f"No trained parametric model found")
|
|
return None
|
|
else:
|
|
model_path = str(model_path)
|
|
|
|
# Auto-detect training data
|
|
if training_data_dir is None:
|
|
default_data = project_root / "atomizer_field_training_data" / "uav_arm_train"
|
|
if default_data.exists():
|
|
training_data_dir = str(default_data)
|
|
else:
|
|
logger.warning(f"No training data found at {default_data}")
|
|
return None
|
|
else:
|
|
training_data_dir = str(training_data_dir)
|
|
|
|
try:
|
|
return ParametricSurrogate(
|
|
model_path=Path(model_path),
|
|
training_data_dir=Path(training_data_dir)
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Failed to create parametric surrogate: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return None
|
|
|
|
|
|
def create_surrogate_for_study(
|
|
model_path: str = None,
|
|
training_data_dir: str = None,
|
|
project_root: Path = None
|
|
) -> Optional[NeuralSurrogate]:
|
|
"""
|
|
Factory function to create neural surrogate for UAV arm study.
|
|
|
|
Args:
|
|
model_path: Path to model checkpoint (auto-detect if None)
|
|
training_data_dir: Path to training data (auto-detect if None)
|
|
project_root: Project root directory for auto-detection
|
|
|
|
Returns:
|
|
NeuralSurrogate instance or None if not available
|
|
"""
|
|
if not TORCH_AVAILABLE or not ATOMIZER_FIELD_AVAILABLE:
|
|
logger.warning("Neural surrogate not available: PyTorch or AtomizerField missing")
|
|
return None
|
|
|
|
# Auto-detect project root
|
|
if project_root is None:
|
|
project_root = Path(__file__).parent.parent
|
|
|
|
# Auto-detect model path
|
|
if model_path is None:
|
|
default_model = project_root / "atomizer-field" / "runs" / "uav_arm_model" / "checkpoint_best.pt"
|
|
if default_model.exists():
|
|
model_path = str(default_model)
|
|
else:
|
|
logger.warning(f"No trained model found at {default_model}")
|
|
return None
|
|
else:
|
|
model_path = str(model_path)
|
|
|
|
# Auto-detect training data
|
|
if training_data_dir is None:
|
|
default_data = project_root / "atomizer_field_training_data" / "uav_arm_train"
|
|
if default_data.exists():
|
|
training_data_dir = str(default_data)
|
|
else:
|
|
logger.warning(f"No training data found at {default_data}")
|
|
return None
|
|
else:
|
|
training_data_dir = str(training_data_dir)
|
|
|
|
try:
|
|
return NeuralSurrogate(
|
|
model_path=Path(model_path),
|
|
training_data_dir=Path(training_data_dir)
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Failed to create neural surrogate: {e}")
|
|
return None
|
|
|
|
|
|
def create_surrogate_from_config(config: Dict[str, Any]) -> Optional[NeuralSurrogate]:
|
|
"""
|
|
Factory function to create neural surrogate from workflow configuration.
|
|
|
|
Args:
|
|
config: Workflow configuration dictionary
|
|
|
|
Returns:
|
|
NeuralSurrogate instance if enabled, None otherwise
|
|
"""
|
|
if not config.get('neural_surrogate', {}).get('enabled', False):
|
|
logger.info("Neural surrogate is disabled")
|
|
return None
|
|
|
|
surrogate_config = config['neural_surrogate']
|
|
|
|
model_path = surrogate_config.get('model_path')
|
|
training_data_dir = surrogate_config.get('training_data_dir')
|
|
|
|
if not model_path:
|
|
logger.error("Neural surrogate enabled but model_path not specified")
|
|
return None
|
|
|
|
if not training_data_dir:
|
|
logger.error("Neural surrogate enabled but training_data_dir not specified")
|
|
return None
|
|
|
|
try:
|
|
surrogate = NeuralSurrogate(
|
|
model_path=Path(model_path),
|
|
training_data_dir=Path(training_data_dir),
|
|
device=surrogate_config.get('device', 'auto')
|
|
)
|
|
|
|
logger.info("Neural surrogate created successfully")
|
|
return surrogate
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to create neural surrogate: {e}")
|
|
return None
|
|
|
|
|
|
def create_hybrid_optimizer_from_config(config: Dict[str, Any]) -> Optional[HybridOptimizer]:
|
|
"""
|
|
Factory function to create hybrid optimizer from configuration.
|
|
|
|
Args:
|
|
config: Workflow configuration dictionary
|
|
|
|
Returns:
|
|
HybridOptimizer instance if enabled, None otherwise
|
|
"""
|
|
if not config.get('hybrid_optimization', {}).get('enabled', False):
|
|
logger.info("Hybrid optimization is disabled")
|
|
return None
|
|
|
|
hybrid_config = config.get('hybrid_optimization', {})
|
|
|
|
try:
|
|
optimizer = HybridOptimizer(hybrid_config)
|
|
logger.info("Hybrid optimizer created successfully")
|
|
return optimizer
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to create hybrid optimizer: {e}")
|
|
return None |