Major changes: - Dashboard: WebSocket-based chat with session management - Dashboard: New chat components (ChatPane, ChatInput, ModeToggle) - Dashboard: Enhanced UI with parallel coordinates chart - MCP Server: New atomizer-tools server for Claude integration - Extractors: Enhanced Zernike OPD extractor - Reports: Improved report generator New studies (configs and scripts only): - M1 Mirror: Cost reduction campaign studies - Simple Beam, Simple Bracket, UAV Arm studies Note: Large iteration data (2_iterations/, best_design_archive/) excluded via .gitignore - kept on local Gitea only. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1096 lines
39 KiB
Python
1096 lines
39 KiB
Python
"""
|
|
bracket_pareto_3obj - Hybrid Neural Network Optimization Script
|
|
================================================================
|
|
|
|
This script implements the hybrid NN/FEA optimization workflow:
|
|
|
|
Phase 1: Export - Extract training data from existing FEA trials
|
|
Phase 2: Train - Train MLP surrogate model on FEA results
|
|
Phase 3: NN-Optimize - Run fast NN-only optimization (1000s of trials)
|
|
Phase 4: Validate - Validate best NN predictions with actual FEA
|
|
|
|
Workflow:
|
|
---------
|
|
1. python run_nn_optimization.py --export # Export training data
|
|
2. python run_nn_optimization.py --train # Train surrogate model
|
|
3. python run_nn_optimization.py --nn-optimize # Run NN optimization
|
|
4. python run_nn_optimization.py --validate # Validate with FEA
|
|
|
|
Or run all phases:
|
|
python run_nn_optimization.py --all
|
|
|
|
Generated for bracket_pareto_3obj study
|
|
"""
|
|
|
|
from pathlib import Path
|
|
import sys
|
|
import json
|
|
import argparse
|
|
from datetime import datetime
|
|
from typing import Dict, Any, Optional, List, Tuple
|
|
import numpy as np
|
|
|
|
# Add parent directory to path
|
|
project_root = Path(__file__).resolve().parents[2]
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
import optuna
|
|
from optuna.samplers import NSGAIISampler, TPESampler
|
|
|
|
# Core imports
|
|
from optimization_engine.nx.solver import NXSolver
|
|
from optimization_engine.utils.logger import get_logger
|
|
|
|
# Extractor imports
|
|
from optimization_engine.extractors.bdf_mass_extractor import extract_mass_from_bdf
|
|
from optimization_engine.extractors.extract_displacement import extract_displacement
|
|
from optimization_engine.extractors.extract_von_mises_stress import extract_solid_stress
|
|
|
|
# Neural surrogate imports
|
|
try:
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
from torch.utils.data import Dataset, DataLoader, random_split
|
|
TORCH_AVAILABLE = True
|
|
except ImportError:
|
|
TORCH_AVAILABLE = False
|
|
print("WARNING: PyTorch not available. NN features disabled.")
|
|
|
|
|
|
# ============================================================================
|
|
# MLP Surrogate Model
|
|
# ============================================================================
|
|
|
|
class MLPSurrogate(nn.Module):
|
|
"""Simple MLP for design parameters -> objectives prediction."""
|
|
|
|
def __init__(self, n_inputs: int = 2, n_outputs: int = 3,
|
|
hidden_dims: List[int] = [64, 128, 64]):
|
|
super().__init__()
|
|
|
|
layers = []
|
|
prev_dim = n_inputs
|
|
|
|
for hidden_dim in hidden_dims:
|
|
layers.extend([
|
|
nn.Linear(prev_dim, hidden_dim),
|
|
nn.LayerNorm(hidden_dim),
|
|
nn.ReLU(),
|
|
nn.Dropout(0.1)
|
|
])
|
|
prev_dim = hidden_dim
|
|
|
|
layers.append(nn.Linear(prev_dim, n_outputs))
|
|
self.network = nn.Sequential(*layers)
|
|
|
|
# Initialize weights
|
|
for m in self.modules():
|
|
if isinstance(m, nn.Linear):
|
|
nn.init.kaiming_normal_(m.weight)
|
|
if m.bias is not None:
|
|
nn.init.constant_(m.bias, 0)
|
|
|
|
def forward(self, x):
|
|
return self.network(x)
|
|
|
|
|
|
class BracketSurrogate:
|
|
"""Surrogate model for bracket Pareto optimization."""
|
|
|
|
def __init__(self, model_path: Path = None, device: str = 'auto'):
|
|
if not TORCH_AVAILABLE:
|
|
raise ImportError("PyTorch required")
|
|
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() and device == 'auto' else 'cpu')
|
|
self.model = None
|
|
self.normalization = None
|
|
self.design_var_names = ['support_angle', 'tip_thickness']
|
|
self.objective_names = ['mass', 'stress', 'stiffness']
|
|
|
|
if model_path and Path(model_path).exists():
|
|
self.load(model_path)
|
|
|
|
def train_from_database(self, db_path: Path, study_name: str,
|
|
epochs: int = 300, save_path: Path = None):
|
|
"""Train surrogate from Optuna database."""
|
|
|
|
print(f"\n{'='*60}")
|
|
print("Training Bracket Surrogate Model")
|
|
print(f"{'='*60}")
|
|
print(f"Device: {self.device}")
|
|
print(f"Database: {db_path}")
|
|
|
|
# Load data from database
|
|
storage = optuna.storages.RDBStorage(f"sqlite:///{db_path}")
|
|
study = optuna.load_study(study_name=study_name, storage=storage)
|
|
|
|
completed = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
|
|
print(f"Found {len(completed)} completed trials")
|
|
|
|
if len(completed) < 10:
|
|
raise ValueError(f"Need at least 10 trials, got {len(completed)}")
|
|
|
|
# Extract data
|
|
design_params = []
|
|
objectives = []
|
|
|
|
for trial in completed:
|
|
# Skip trials with infinite values
|
|
if any(v == float('inf') for v in trial.values):
|
|
continue
|
|
|
|
# Design parameters
|
|
params = [
|
|
trial.params.get('support_angle', 45.0),
|
|
trial.params.get('tip_thickness', 45.0)
|
|
]
|
|
|
|
# Objectives: mass, stress, stiffness (all 3 values from multi-objective)
|
|
objs = list(trial.values) # [mass, stress, stiffness]
|
|
|
|
design_params.append(params)
|
|
objectives.append(objs)
|
|
|
|
design_params = np.array(design_params, dtype=np.float32)
|
|
objectives = np.array(objectives, dtype=np.float32)
|
|
|
|
print(f"Valid samples: {len(design_params)}")
|
|
print(f"Design var ranges:")
|
|
print(f" support_angle: {design_params[:, 0].min():.1f} - {design_params[:, 0].max():.1f}")
|
|
print(f" tip_thickness: {design_params[:, 1].min():.1f} - {design_params[:, 1].max():.1f}")
|
|
print(f"Objective ranges:")
|
|
for i, name in enumerate(self.objective_names):
|
|
print(f" {name}: {objectives[:, i].min():.4f} - {objectives[:, i].max():.4f}")
|
|
|
|
# Compute normalization stats
|
|
design_mean = design_params.mean(axis=0)
|
|
design_std = design_params.std(axis=0) + 1e-8
|
|
objective_mean = objectives.mean(axis=0)
|
|
objective_std = objectives.std(axis=0) + 1e-8
|
|
|
|
self.normalization = {
|
|
'design_mean': design_mean,
|
|
'design_std': design_std,
|
|
'objective_mean': objective_mean,
|
|
'objective_std': objective_std
|
|
}
|
|
|
|
# Normalize
|
|
X = (design_params - design_mean) / design_std
|
|
Y = (objectives - objective_mean) / objective_std
|
|
|
|
# Create dataset
|
|
X_tensor = torch.tensor(X, dtype=torch.float32)
|
|
Y_tensor = torch.tensor(Y, dtype=torch.float32)
|
|
|
|
dataset = torch.utils.data.TensorDataset(X_tensor, Y_tensor)
|
|
|
|
n_val = max(1, int(len(dataset) * 0.2))
|
|
n_train = len(dataset) - n_val
|
|
train_ds, val_ds = random_split(dataset, [n_train, n_val])
|
|
|
|
train_loader = DataLoader(train_ds, batch_size=16, shuffle=True)
|
|
val_loader = DataLoader(val_ds, batch_size=16)
|
|
|
|
print(f"\nTraining: {n_train} samples, Validation: {n_val} samples")
|
|
|
|
# Create model
|
|
self.model = MLPSurrogate(
|
|
n_inputs=len(self.design_var_names),
|
|
n_outputs=len(self.objective_names),
|
|
hidden_dims=[64, 128, 128, 64]
|
|
).to(self.device)
|
|
|
|
n_params = sum(p.numel() for p in self.model.parameters())
|
|
print(f"Model parameters: {n_params:,}")
|
|
|
|
# Training
|
|
optimizer = torch.optim.AdamW(self.model.parameters(), lr=0.001, weight_decay=1e-5)
|
|
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs)
|
|
|
|
best_val_loss = float('inf')
|
|
best_state = None
|
|
|
|
print(f"\nTraining for {epochs} epochs...")
|
|
|
|
for epoch in range(epochs):
|
|
# Train
|
|
self.model.train()
|
|
train_loss = 0.0
|
|
for x, y in train_loader:
|
|
x, y = x.to(self.device), y.to(self.device)
|
|
optimizer.zero_grad()
|
|
pred = self.model(x)
|
|
loss = F.mse_loss(pred, y)
|
|
loss.backward()
|
|
optimizer.step()
|
|
train_loss += loss.item()
|
|
train_loss /= len(train_loader)
|
|
|
|
# Validate
|
|
self.model.eval()
|
|
val_loss = 0.0
|
|
with torch.no_grad():
|
|
for x, y in val_loader:
|
|
x, y = x.to(self.device), y.to(self.device)
|
|
pred = self.model(x)
|
|
val_loss += F.mse_loss(pred, y).item()
|
|
val_loss /= len(val_loader)
|
|
|
|
scheduler.step()
|
|
|
|
if val_loss < best_val_loss:
|
|
best_val_loss = val_loss
|
|
best_state = self.model.state_dict().copy()
|
|
|
|
if (epoch + 1) % 50 == 0 or epoch == 0:
|
|
print(f" Epoch {epoch+1:3d}: train={train_loss:.6f}, val={val_loss:.6f}")
|
|
|
|
# Load best model
|
|
self.model.load_state_dict(best_state)
|
|
print(f"\nBest validation loss: {best_val_loss:.6f}")
|
|
|
|
# Evaluate accuracy
|
|
self.model.eval()
|
|
all_preds = []
|
|
all_targets = []
|
|
|
|
with torch.no_grad():
|
|
for x, y in val_loader:
|
|
x = x.to(self.device)
|
|
pred = self.model(x).cpu().numpy()
|
|
all_preds.append(pred)
|
|
all_targets.append(y.numpy())
|
|
|
|
all_preds = np.concatenate(all_preds)
|
|
all_targets = np.concatenate(all_targets)
|
|
|
|
# Denormalize
|
|
preds_denorm = all_preds * objective_std + objective_mean
|
|
targets_denorm = all_targets * objective_std + objective_mean
|
|
|
|
print(f"\nValidation accuracy:")
|
|
for i, name in enumerate(self.objective_names):
|
|
mae = np.abs(preds_denorm[:, i] - targets_denorm[:, i]).mean()
|
|
mape = (np.abs(preds_denorm[:, i] - targets_denorm[:, i]) /
|
|
(np.abs(targets_denorm[:, i]) + 1e-8)).mean() * 100
|
|
print(f" {name}: MAE={mae:.4f}, MAPE={mape:.1f}%")
|
|
|
|
# Save if requested
|
|
if save_path:
|
|
self.save(save_path)
|
|
|
|
print(f"\n{'='*60}")
|
|
print("Training complete!")
|
|
print(f"{'='*60}\n")
|
|
|
|
return self
|
|
|
|
def predict(self, design_params: Dict[str, float]) -> Dict[str, float]:
|
|
"""Predict objectives from design parameters."""
|
|
if self.model is None:
|
|
raise ValueError("Model not trained")
|
|
|
|
# Build input
|
|
x = np.array([
|
|
design_params.get('support_angle', 45.0),
|
|
design_params.get('tip_thickness', 45.0)
|
|
], dtype=np.float32)
|
|
|
|
# Normalize
|
|
x_norm = (x - self.normalization['design_mean']) / self.normalization['design_std']
|
|
x_tensor = torch.tensor(x_norm, dtype=torch.float32, device=self.device).unsqueeze(0)
|
|
|
|
# Predict
|
|
self.model.eval()
|
|
with torch.no_grad():
|
|
y_norm = self.model(x_tensor).cpu().numpy()[0]
|
|
|
|
# Denormalize
|
|
y = y_norm * self.normalization['objective_std'] + self.normalization['objective_mean']
|
|
|
|
return {
|
|
'mass': float(y[0]),
|
|
'stress': float(y[1]),
|
|
'stiffness': float(y[2])
|
|
}
|
|
|
|
def save(self, path: Path):
|
|
"""Save model to file."""
|
|
torch.save({
|
|
'model_state_dict': self.model.state_dict(),
|
|
'normalization': {
|
|
'design_mean': self.normalization['design_mean'].tolist(),
|
|
'design_std': self.normalization['design_std'].tolist(),
|
|
'objective_mean': self.normalization['objective_mean'].tolist(),
|
|
'objective_std': self.normalization['objective_std'].tolist()
|
|
},
|
|
'design_var_names': self.design_var_names,
|
|
'objective_names': self.objective_names
|
|
}, path)
|
|
print(f"Model saved to {path}")
|
|
|
|
def load(self, path: Path):
|
|
"""Load model from file."""
|
|
checkpoint = torch.load(path, map_location=self.device)
|
|
|
|
self.model = MLPSurrogate(
|
|
n_inputs=2, n_outputs=3, hidden_dims=[64, 128, 128, 64]
|
|
).to(self.device)
|
|
self.model.load_state_dict(checkpoint['model_state_dict'])
|
|
self.model.eval()
|
|
|
|
norm = checkpoint['normalization']
|
|
self.normalization = {
|
|
'design_mean': np.array(norm['design_mean']),
|
|
'design_std': np.array(norm['design_std']),
|
|
'objective_mean': np.array(norm['objective_mean']),
|
|
'objective_std': np.array(norm['objective_std'])
|
|
}
|
|
|
|
self.design_var_names = checkpoint.get('design_var_names', ['support_angle', 'tip_thickness'])
|
|
self.objective_names = checkpoint.get('objective_names', ['mass', 'stress', 'stiffness'])
|
|
|
|
print(f"Model loaded from {path}")
|
|
|
|
|
|
# ============================================================================
|
|
# Phase Functions
|
|
# ============================================================================
|
|
|
|
def phase_export(results_dir: Path, study_name: str, logger):
|
|
"""Phase 1: Export training data summary from database."""
|
|
|
|
print(f"\n{'='*60}")
|
|
print("PHASE 1: Export Training Data Summary")
|
|
print(f"{'='*60}")
|
|
|
|
db_path = results_dir / "study.db"
|
|
if not db_path.exists():
|
|
print(f"ERROR: Database not found at {db_path}")
|
|
return False
|
|
|
|
storage = optuna.storages.RDBStorage(f"sqlite:///{db_path}")
|
|
study = optuna.load_study(study_name=study_name, storage=storage)
|
|
|
|
completed = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
|
|
print(f"Completed trials: {len(completed)}")
|
|
|
|
# Export summary
|
|
export_data = {
|
|
'study_name': study_name,
|
|
'n_trials': len(completed),
|
|
'timestamp': datetime.now().isoformat(),
|
|
'trials': []
|
|
}
|
|
|
|
for trial in completed:
|
|
if any(v == float('inf') for v in trial.values):
|
|
continue
|
|
|
|
export_data['trials'].append({
|
|
'number': trial.number,
|
|
'params': trial.params,
|
|
'values': trial.values,
|
|
'user_attrs': trial.user_attrs
|
|
})
|
|
|
|
export_path = results_dir / "training_data.json"
|
|
with open(export_path, 'w') as f:
|
|
json.dump(export_data, f, indent=2)
|
|
|
|
print(f"Exported {len(export_data['trials'])} valid trials to {export_path}")
|
|
|
|
# Also save Pareto front
|
|
pareto_trials = study.best_trials
|
|
pareto_data = {
|
|
'n_pareto': len(pareto_trials),
|
|
'trials': [{
|
|
'number': t.number,
|
|
'params': t.params,
|
|
'values': t.values
|
|
} for t in pareto_trials]
|
|
}
|
|
|
|
pareto_path = results_dir / "pareto_front.json"
|
|
with open(pareto_path, 'w') as f:
|
|
json.dump(pareto_data, f, indent=2)
|
|
|
|
print(f"Pareto front: {len(pareto_trials)} solutions saved to {pareto_path}")
|
|
|
|
return True
|
|
|
|
|
|
def phase_train(results_dir: Path, study_name: str, logger, epochs: int = 300):
|
|
"""Phase 2: Train surrogate model."""
|
|
|
|
print(f"\n{'='*60}")
|
|
print("PHASE 2: Train Surrogate Model")
|
|
print(f"{'='*60}")
|
|
|
|
if not TORCH_AVAILABLE:
|
|
print("ERROR: PyTorch not available")
|
|
return None
|
|
|
|
db_path = results_dir / "study.db"
|
|
model_path = results_dir / "surrogate_best.pt"
|
|
|
|
surrogate = BracketSurrogate(device='auto')
|
|
surrogate.train_from_database(
|
|
db_path=db_path,
|
|
study_name=study_name,
|
|
epochs=epochs,
|
|
save_path=model_path
|
|
)
|
|
|
|
return surrogate
|
|
|
|
|
|
def phase_nn_optimize(results_dir: Path, study_name: str, surrogate: BracketSurrogate,
|
|
n_trials: int = 1000, logger=None):
|
|
"""Phase 3: Run NN-accelerated optimization.
|
|
|
|
NOTE: NN results are stored in a SEPARATE database (nn_study.db) to avoid
|
|
overloading the dashboard. Only FEA-validated results go into the main study.db.
|
|
"""
|
|
|
|
print(f"\n{'='*60}")
|
|
print("PHASE 3: Neural Network Optimization")
|
|
print(f"{'='*60}")
|
|
print(f"Running {n_trials} trials using NN surrogate (~milliseconds each)")
|
|
print(f"NOTE: NN results stored in separate nn_study.db (not shown in dashboard)")
|
|
|
|
# Create NN-only study in SEPARATE database
|
|
# This prevents the dashboard from being overloaded with 1000s of NN trials
|
|
nn_db_path = results_dir / "nn_study.db"
|
|
storage = f"sqlite:///{nn_db_path}"
|
|
|
|
# Use NSGA-II for multi-objective
|
|
nn_study = optuna.create_study(
|
|
study_name=f"{study_name}_nn",
|
|
storage=storage,
|
|
sampler=NSGAIISampler(population_size=50, seed=42),
|
|
directions=['minimize', 'minimize', 'minimize'], # mass, stress, -stiffness
|
|
load_if_exists=True
|
|
)
|
|
|
|
# Define NN objective
|
|
def nn_objective(trial):
|
|
support_angle = trial.suggest_float('support_angle', 20.0, 70.0)
|
|
tip_thickness = trial.suggest_float('tip_thickness', 30.0, 60.0)
|
|
|
|
# NN prediction (sub-millisecond)
|
|
pred = surrogate.predict({
|
|
'support_angle': support_angle,
|
|
'tip_thickness': tip_thickness
|
|
})
|
|
|
|
# Store for later analysis
|
|
trial.set_user_attr('nn_mass', pred['mass'])
|
|
trial.set_user_attr('nn_stress', pred['stress'])
|
|
trial.set_user_attr('nn_stiffness', pred['stiffness'])
|
|
|
|
return pred['mass'], pred['stress'], pred['stiffness']
|
|
|
|
# Run NN optimization
|
|
import time
|
|
start = time.time()
|
|
|
|
nn_study.optimize(nn_objective, n_trials=n_trials, show_progress_bar=True)
|
|
|
|
elapsed = time.time() - start
|
|
print(f"\nNN optimization completed:")
|
|
print(f" Trials: {n_trials}")
|
|
print(f" Time: {elapsed:.1f}s ({elapsed/n_trials*1000:.2f}ms per trial)")
|
|
|
|
# Get Pareto front
|
|
pareto_trials = nn_study.best_trials
|
|
print(f" Pareto solutions: {len(pareto_trials)}")
|
|
|
|
# Save NN Pareto front
|
|
nn_pareto = {
|
|
'type': 'nn_optimization',
|
|
'n_trials': n_trials,
|
|
'time_seconds': elapsed,
|
|
'n_pareto': len(pareto_trials),
|
|
'trials': [{
|
|
'number': t.number,
|
|
'params': t.params,
|
|
'values': t.values
|
|
} for t in pareto_trials]
|
|
}
|
|
|
|
nn_pareto_path = results_dir / "nn_pareto_front.json"
|
|
with open(nn_pareto_path, 'w') as f:
|
|
json.dump(nn_pareto, f, indent=2)
|
|
|
|
print(f" NN Pareto saved to {nn_pareto_path}")
|
|
|
|
# Save optimization state
|
|
state = {
|
|
'phase': 'nn_optimization',
|
|
'timestamp': datetime.now().isoformat(),
|
|
'n_trials': n_trials,
|
|
'n_pareto': len(pareto_trials),
|
|
'best_candidates': [{
|
|
'params': t.params,
|
|
'nn_objectives': t.values
|
|
} for t in pareto_trials[:20]] # Top 20 candidates for validation
|
|
}
|
|
|
|
state_path = results_dir / "nn_optimization_state.json"
|
|
with open(state_path, 'w') as f:
|
|
json.dump(state, f, indent=2)
|
|
|
|
return nn_study, pareto_trials
|
|
|
|
|
|
def phase_validate(results_dir: Path, model_dir: Path, config: dict,
|
|
surrogate: BracketSurrogate, n_validate: int = 10, logger=None,
|
|
add_to_main_study: bool = True, study_name: str = "bracket_pareto_3obj"):
|
|
"""Phase 4: Validate best NN predictions with FEA.
|
|
|
|
Validated results are added to the main study.db so they:
|
|
1. Appear in the dashboard alongside the original FEA runs
|
|
2. Are included in future surrogate retraining
|
|
"""
|
|
|
|
print(f"\n{'='*60}")
|
|
print("PHASE 4: FEA Validation of NN Predictions")
|
|
print(f"{'='*60}")
|
|
if add_to_main_study:
|
|
print(f"NOTE: Validated results will be added to main study.db (visible in dashboard)")
|
|
|
|
# Load NN Pareto front
|
|
nn_pareto_path = results_dir / "nn_pareto_front.json"
|
|
if not nn_pareto_path.exists():
|
|
print("ERROR: NN Pareto front not found. Run --nn-optimize first.")
|
|
return None
|
|
|
|
with open(nn_pareto_path) as f:
|
|
nn_pareto = json.load(f)
|
|
|
|
candidates = nn_pareto['trials'][:n_validate]
|
|
print(f"Validating {len(candidates)} best NN candidates with FEA")
|
|
|
|
# Initialize NX solver
|
|
nx_solver = NXSolver(nastran_version="2506")
|
|
sim_file = model_dir / config['simulation']['sim_file']
|
|
|
|
validated_results = []
|
|
|
|
for i, candidate in enumerate(candidates):
|
|
params = candidate['params']
|
|
nn_values = candidate['values']
|
|
|
|
print(f"\n Validating candidate {i+1}/{len(candidates)}:")
|
|
print(f" support_angle={params['support_angle']:.1f}, tip_thickness={params['tip_thickness']:.1f}")
|
|
print(f" NN prediction: mass={nn_values[0]:.4f}, stress={nn_values[1]:.2f}, stiffness={nn_values[2]:.2f}")
|
|
|
|
# Run FEA
|
|
result = nx_solver.run_simulation(
|
|
sim_file=sim_file,
|
|
working_dir=model_dir,
|
|
expression_updates=params,
|
|
solution_name=config['simulation'].get('solution_name'),
|
|
cleanup=True
|
|
)
|
|
|
|
if not result['success']:
|
|
print(f" FEA FAILED: {result.get('error', 'Unknown')}")
|
|
continue
|
|
|
|
# Extract results
|
|
op2_file = result['op2_file']
|
|
dat_file = model_dir / config['simulation']['dat_file']
|
|
|
|
fea_mass = extract_mass_from_bdf(str(dat_file))
|
|
stress_result = extract_solid_stress(op2_file, subcase=1, element_type='chexa')
|
|
fea_stress = stress_result.get('max_von_mises', float('inf')) / 1000.0
|
|
disp_result = extract_displacement(op2_file, subcase=1)
|
|
max_disp = disp_result['max_displacement']
|
|
fea_stiffness = -1000.0 / max(abs(max_disp), 1e-6)
|
|
|
|
print(f" FEA result: mass={fea_mass:.4f}, stress={fea_stress:.2f}, stiffness={fea_stiffness:.2f}")
|
|
|
|
# Compute errors
|
|
mass_err = abs(fea_mass - nn_values[0]) / fea_mass * 100
|
|
stress_err = abs(fea_stress - nn_values[1]) / fea_stress * 100
|
|
stiff_err = abs(fea_stiffness - nn_values[2]) / abs(fea_stiffness) * 100
|
|
|
|
print(f" NN Error: mass={mass_err:.1f}%, stress={stress_err:.1f}%, stiffness={stiff_err:.1f}%")
|
|
|
|
validated_results.append({
|
|
'params': params,
|
|
'nn_objectives': nn_values,
|
|
'fea_objectives': [fea_mass, fea_stress, fea_stiffness],
|
|
'errors_percent': [mass_err, stress_err, stiff_err]
|
|
})
|
|
|
|
# Add validated results to main study database
|
|
if add_to_main_study and validated_results:
|
|
print(f"\nAdding {len(validated_results)} validated results to main study.db...")
|
|
main_db_path = results_dir / "study.db"
|
|
main_storage = f"sqlite:///{main_db_path}"
|
|
|
|
try:
|
|
# Load existing study
|
|
main_study = optuna.load_study(
|
|
study_name=study_name,
|
|
storage=main_storage,
|
|
sampler=NSGAIISampler(population_size=20, seed=42)
|
|
)
|
|
|
|
# Add each validated result as a new trial
|
|
for result in validated_results:
|
|
# Create a new trial with the FEA results
|
|
trial = main_study.ask()
|
|
|
|
# Set the parameters
|
|
for param_name, param_value in result['params'].items():
|
|
trial.suggest_float(param_name, param_value, param_value)
|
|
|
|
# Tell the study the FEA objective values
|
|
fea_objs = result['fea_objectives']
|
|
main_study.tell(trial, fea_objs)
|
|
|
|
# Mark as NN-validated
|
|
trial.set_user_attr('source', 'nn_validated')
|
|
trial.set_user_attr('nn_prediction', result['nn_objectives'])
|
|
trial.set_user_attr('nn_error_percent', result['errors_percent'])
|
|
|
|
print(f" Added {len(validated_results)} trials to main study (now {len(main_study.trials)} total)")
|
|
|
|
except Exception as e:
|
|
print(f" WARNING: Could not add to main study: {e}")
|
|
|
|
# Summary
|
|
print(f"\n{'='*60}")
|
|
print("Validation Summary")
|
|
print(f"{'='*60}")
|
|
|
|
if validated_results:
|
|
avg_errors = np.array([r['errors_percent'] for r in validated_results]).mean(axis=0)
|
|
print(f"Average NN prediction error:")
|
|
print(f" Mass: {avg_errors[0]:.1f}%")
|
|
print(f" Stress: {avg_errors[1]:.1f}%")
|
|
print(f" Stiffness: {avg_errors[2]:.1f}%")
|
|
|
|
# Save validation results
|
|
validation_report = {
|
|
'timestamp': datetime.now().isoformat(),
|
|
'n_validated': len(validated_results),
|
|
'average_errors_percent': {
|
|
'mass': float(avg_errors[0]),
|
|
'stress': float(avg_errors[1]),
|
|
'stiffness': float(avg_errors[2])
|
|
},
|
|
'results': validated_results
|
|
}
|
|
|
|
report_path = results_dir / "validation_report.json"
|
|
with open(report_path, 'w') as f:
|
|
json.dump(validation_report, f, indent=2)
|
|
|
|
print(f"\nValidation report saved to {report_path}")
|
|
|
|
return validated_results
|
|
|
|
|
|
def phase_hybrid_loop(results_dir: Path, model_dir: Path, config: dict,
|
|
study_name: str, n_iterations: int = 3,
|
|
nn_trials_per_iter: int = 500, validate_per_iter: int = 5,
|
|
epochs: int = 300, logger=None):
|
|
"""
|
|
Run adaptive hybrid loop: Train → NN-Optimize → Validate → Retrain → Repeat
|
|
|
|
This continuously improves the surrogate by:
|
|
1. Running NN optimization to find promising candidates
|
|
2. Validating top candidates with FEA
|
|
3. Adding FEA results to training data
|
|
4. Retraining surrogate with expanded dataset
|
|
5. Repeat until convergence or max iterations
|
|
|
|
Args:
|
|
n_iterations: Number of hybrid loop iterations
|
|
nn_trials_per_iter: NN trials per iteration
|
|
validate_per_iter: FEA validations per iteration
|
|
"""
|
|
|
|
print(f"\n{'#'*60}")
|
|
print("# HYBRID ADAPTIVE LOOP")
|
|
print(f"{'#'*60}")
|
|
print(f"Iterations: {n_iterations}")
|
|
print(f"NN trials per iteration: {nn_trials_per_iter}")
|
|
print(f"FEA validations per iteration: {validate_per_iter}")
|
|
print(f"Total FEA budget: {n_iterations * validate_per_iter} additional runs")
|
|
|
|
model_path = results_dir / "surrogate_best.pt"
|
|
|
|
for iteration in range(1, n_iterations + 1):
|
|
print(f"\n{'='*60}")
|
|
print(f"ITERATION {iteration}/{n_iterations}")
|
|
print(f"{'='*60}")
|
|
|
|
# Step 1: Train/Retrain surrogate from current database
|
|
print(f"\n[{iteration}.1] Training surrogate from current FEA data...")
|
|
surrogate = phase_train(results_dir, study_name, logger, epochs=epochs)
|
|
|
|
# Step 2: Run NN optimization
|
|
print(f"\n[{iteration}.2] Running NN optimization ({nn_trials_per_iter} trials)...")
|
|
phase_nn_optimize(results_dir, study_name, surrogate,
|
|
n_trials=nn_trials_per_iter, logger=logger)
|
|
|
|
# Step 3: Validate top candidates with FEA
|
|
print(f"\n[{iteration}.3] Validating top {validate_per_iter} candidates with FEA...")
|
|
validated = phase_validate(
|
|
results_dir, model_dir, config, surrogate,
|
|
n_validate=validate_per_iter, logger=logger,
|
|
add_to_main_study=True, study_name=study_name
|
|
)
|
|
|
|
# Check convergence (if errors are low enough)
|
|
if validated:
|
|
avg_errors = np.array([r['errors_percent'] for r in validated]).mean(axis=0)
|
|
max_error = max(avg_errors)
|
|
|
|
print(f"\n Iteration {iteration} summary:")
|
|
print(f" Average errors: mass={avg_errors[0]:.1f}%, stress={avg_errors[1]:.1f}%, stiffness={avg_errors[2]:.1f}%")
|
|
print(f" Max error: {max_error:.1f}%")
|
|
|
|
if max_error < 5.0:
|
|
print(f"\n ✓ Convergence reached! Max error < 5%")
|
|
break
|
|
|
|
# Final summary
|
|
print(f"\n{'#'*60}")
|
|
print("# HYBRID LOOP COMPLETE")
|
|
print(f"{'#'*60}")
|
|
|
|
# Load final study stats
|
|
main_db_path = results_dir / "study.db"
|
|
main_storage = f"sqlite:///{main_db_path}"
|
|
main_study = optuna.load_study(study_name=study_name, storage=main_storage)
|
|
|
|
print(f"Total FEA trials in study: {len(main_study.trials)}")
|
|
print(f"Pareto front size: {len(main_study.best_trials)}")
|
|
|
|
return surrogate
|
|
|
|
|
|
def phase_turbo_loop(results_dir: Path, model_dir: Path, config: dict,
|
|
study_name: str, total_nn_trials: int = 10000,
|
|
nn_batch_size: int = 100, retrain_every: int = 10,
|
|
epochs: int = 150, logger=None):
|
|
"""
|
|
TURBO MODE: Aggressive adaptive optimization.
|
|
|
|
Strategy:
|
|
- Run NN in small batches (100 trials)
|
|
- Validate ONLY the single best candidate with FEA
|
|
- Add to training data immediately
|
|
- Retrain surrogate every N FEA validations
|
|
- Repeat until total NN budget exhausted
|
|
|
|
This is more efficient than batch validation because:
|
|
- We always explore the most promising direction
|
|
- FEA validates where we actually care
|
|
- Surrogate improves continuously
|
|
|
|
Example: 10K NN trials with batch=100 → 100 FEA validations
|
|
"""
|
|
|
|
print(f"\n{'#'*60}")
|
|
print("# TURBO MODE: Aggressive Adaptive Optimization")
|
|
print(f"{'#'*60}")
|
|
print(f"Total NN budget: {total_nn_trials:,} trials")
|
|
print(f"NN batch size: {nn_batch_size}")
|
|
print(f"FEA validations: ~{total_nn_trials // nn_batch_size}")
|
|
print(f"Retrain every: {retrain_every} FEA runs")
|
|
|
|
model_path = results_dir / "surrogate_best.pt"
|
|
nx_solver = NXSolver(nastran_version="2506")
|
|
sim_file = model_dir / config['simulation']['sim_file']
|
|
|
|
# Initial training
|
|
print(f"\n[INIT] Training initial surrogate...")
|
|
surrogate = phase_train(results_dir, study_name, logger, epochs=epochs)
|
|
|
|
# Tracking
|
|
fea_count = 0
|
|
nn_count = 0
|
|
best_solutions = []
|
|
iteration = 0
|
|
|
|
import time
|
|
start_time = time.time()
|
|
|
|
while nn_count < total_nn_trials:
|
|
iteration += 1
|
|
batch_trials = min(nn_batch_size, total_nn_trials - nn_count)
|
|
|
|
print(f"\n{'─'*50}")
|
|
print(f"Iteration {iteration}: NN trials {nn_count+1}-{nn_count+batch_trials}")
|
|
|
|
# Run NN batch (in-memory, no database)
|
|
best_candidate = None
|
|
best_score = float('inf')
|
|
|
|
for _ in range(batch_trials):
|
|
# Random sample in design space
|
|
support_angle = np.random.uniform(20.0, 70.0)
|
|
tip_thickness = np.random.uniform(30.0, 60.0)
|
|
|
|
params = {'support_angle': support_angle, 'tip_thickness': tip_thickness}
|
|
pred = surrogate.predict(params)
|
|
|
|
# Score: weighted combination (lower is better)
|
|
# Adjust weights based on what matters most
|
|
score = pred['mass'] + 0.01 * pred['stress'] + 0.1 * abs(pred['stiffness'])
|
|
|
|
if score < best_score:
|
|
best_score = score
|
|
best_candidate = {
|
|
'params': params,
|
|
'nn_pred': pred
|
|
}
|
|
|
|
nn_count += batch_trials
|
|
|
|
# Validate best candidate with FEA
|
|
params = best_candidate['params']
|
|
nn_pred = best_candidate['nn_pred']
|
|
|
|
print(f" Best NN: angle={params['support_angle']:.1f}, thick={params['tip_thickness']:.1f}")
|
|
print(f" NN → mass={nn_pred['mass']:.4f}, stress={nn_pred['stress']:.1f}, stiff={nn_pred['stiffness']:.1f}")
|
|
|
|
# Run FEA
|
|
result = nx_solver.run_simulation(
|
|
sim_file=sim_file,
|
|
working_dir=model_dir,
|
|
expression_updates=params,
|
|
solution_name=config['simulation'].get('solution_name'),
|
|
cleanup=True
|
|
)
|
|
|
|
if not result['success']:
|
|
print(f" FEA FAILED - skipping")
|
|
continue
|
|
|
|
# Extract FEA results
|
|
op2_file = result['op2_file']
|
|
dat_file = model_dir / config['simulation']['dat_file']
|
|
|
|
fea_mass = extract_mass_from_bdf(str(dat_file))
|
|
stress_result = extract_solid_stress(op2_file, subcase=1, element_type='chexa')
|
|
fea_stress = stress_result.get('max_von_mises', float('inf')) / 1000.0
|
|
disp_result = extract_displacement(op2_file, subcase=1)
|
|
max_disp = disp_result['max_displacement']
|
|
fea_stiffness = -1000.0 / max(abs(max_disp), 1e-6)
|
|
|
|
print(f" FEA → mass={fea_mass:.4f}, stress={fea_stress:.1f}, stiff={fea_stiffness:.1f}")
|
|
|
|
# Compute prediction error
|
|
mass_err = abs(fea_mass - nn_pred['mass']) / fea_mass * 100
|
|
stress_err = abs(fea_stress - nn_pred['stress']) / fea_stress * 100
|
|
print(f" Error: mass={mass_err:.1f}%, stress={stress_err:.1f}%")
|
|
|
|
fea_count += 1
|
|
|
|
# Add to main study database
|
|
main_db_path = results_dir / "study.db"
|
|
main_storage = f"sqlite:///{main_db_path}"
|
|
|
|
try:
|
|
main_study = optuna.load_study(
|
|
study_name=study_name,
|
|
storage=main_storage,
|
|
sampler=NSGAIISampler(population_size=20, seed=42)
|
|
)
|
|
|
|
trial = main_study.ask()
|
|
trial.suggest_float('support_angle', params['support_angle'], params['support_angle'])
|
|
trial.suggest_float('tip_thickness', params['tip_thickness'], params['tip_thickness'])
|
|
main_study.tell(trial, [fea_mass, fea_stress, fea_stiffness])
|
|
|
|
trial.set_user_attr('source', 'turbo_mode')
|
|
trial.set_user_attr('iteration', iteration)
|
|
|
|
except Exception as e:
|
|
print(f" Warning: couldn't add to study: {e}")
|
|
|
|
# Track best solutions
|
|
best_solutions.append({
|
|
'iteration': iteration,
|
|
'params': params,
|
|
'fea': [fea_mass, fea_stress, fea_stiffness],
|
|
'nn_error': [mass_err, stress_err]
|
|
})
|
|
|
|
# Retrain periodically
|
|
if fea_count % retrain_every == 0:
|
|
print(f"\n [RETRAIN] Retraining surrogate with {len(main_study.trials)} samples...")
|
|
surrogate = phase_train(results_dir, study_name, logger, epochs=epochs)
|
|
|
|
# Progress
|
|
elapsed = time.time() - start_time
|
|
rate = nn_count / elapsed if elapsed > 0 else 0
|
|
remaining = (total_nn_trials - nn_count) / rate if rate > 0 else 0
|
|
print(f" Progress: {nn_count:,}/{total_nn_trials:,} NN | {fea_count} FEA | {elapsed/60:.1f}min elapsed | ~{remaining/60:.1f}min remaining")
|
|
|
|
# Final summary
|
|
print(f"\n{'#'*60}")
|
|
print("# TURBO MODE COMPLETE")
|
|
print(f"{'#'*60}")
|
|
print(f"NN trials: {nn_count:,}")
|
|
print(f"FEA validations: {fea_count}")
|
|
print(f"Time: {(time.time() - start_time)/60:.1f} minutes")
|
|
|
|
# Load final study
|
|
main_study = optuna.load_study(study_name=study_name, storage=main_storage)
|
|
print(f"Total trials in study: {len(main_study.trials)}")
|
|
print(f"Pareto front: {len(main_study.best_trials)} solutions")
|
|
|
|
# Save turbo results
|
|
turbo_report = {
|
|
'mode': 'turbo',
|
|
'total_nn_trials': nn_count,
|
|
'fea_validations': fea_count,
|
|
'time_minutes': (time.time() - start_time) / 60,
|
|
'best_solutions': best_solutions[-20:] # Last 20
|
|
}
|
|
|
|
report_path = results_dir / "turbo_report.json"
|
|
with open(report_path, 'w') as f:
|
|
json.dump(turbo_report, f, indent=2)
|
|
|
|
print(f"\nReport saved to {report_path}")
|
|
|
|
return surrogate
|
|
|
|
|
|
def load_config(config_file: Path) -> dict:
|
|
"""Load configuration from JSON file."""
|
|
with open(config_file, 'r') as f:
|
|
return json.load(f)
|
|
|
|
|
|
def main():
|
|
"""Main workflow."""
|
|
parser = argparse.ArgumentParser(description='bracket_pareto_3obj - Hybrid NN Optimization')
|
|
|
|
# Phase selection
|
|
parser.add_argument('--export', action='store_true', help='Phase 1: Export training data')
|
|
parser.add_argument('--train', action='store_true', help='Phase 2: Train surrogate')
|
|
parser.add_argument('--nn-optimize', action='store_true', help='Phase 3: NN optimization')
|
|
parser.add_argument('--validate', action='store_true', help='Phase 4: FEA validation')
|
|
parser.add_argument('--all', action='store_true', help='Run all phases once')
|
|
parser.add_argument('--hybrid-loop', action='store_true',
|
|
help='Run adaptive hybrid loop: Train→NN→Validate→Retrain (repeats)')
|
|
parser.add_argument('--turbo', action='store_true',
|
|
help='TURBO: Run 100 NN, validate best, retrain, repeat for 10K total')
|
|
|
|
# Parameters
|
|
parser.add_argument('--epochs', type=int, default=300, help='Training epochs')
|
|
parser.add_argument('--nn-trials', type=int, default=1000, help='NN optimization trials (or total for turbo)')
|
|
parser.add_argument('--validate-count', type=int, default=10, help='Number of candidates to validate')
|
|
parser.add_argument('--iterations', type=int, default=3, help='Hybrid loop iterations')
|
|
parser.add_argument('--batch-size', type=int, default=100, help='NN batch size for turbo mode')
|
|
parser.add_argument('--retrain-every', type=int, default=10, help='Retrain surrogate every N FEA runs (turbo)')
|
|
|
|
args = parser.parse_args()
|
|
|
|
if not any([args.export, args.train, args.nn_optimize, args.validate, args.all, args.hybrid_loop, args.turbo]):
|
|
print("No phase specified. Use --export, --train, --nn-optimize, --validate, --all, --hybrid-loop, or --turbo")
|
|
return 1
|
|
|
|
# Setup paths
|
|
study_dir = Path(__file__).parent
|
|
config_path = study_dir / "1_setup" / "optimization_config.json"
|
|
model_dir = study_dir / "1_setup" / "model"
|
|
results_dir = study_dir / "2_results"
|
|
results_dir.mkdir(exist_ok=True)
|
|
|
|
study_name = "bracket_pareto_3obj"
|
|
model_path = results_dir / "surrogate_best.pt"
|
|
|
|
# Initialize
|
|
logger = get_logger(study_name, study_dir=results_dir)
|
|
config = load_config(config_path)
|
|
|
|
print(f"\n{'#'*60}")
|
|
print(f"# Bracket Pareto 3-Objective - Hybrid NN Optimization")
|
|
print(f"{'#'*60}")
|
|
print(f"Study: {study_name}")
|
|
print(f"Results: {results_dir}")
|
|
|
|
# Execute phases
|
|
surrogate = None
|
|
|
|
if args.all or args.export:
|
|
phase_export(results_dir, study_name, logger)
|
|
|
|
if args.all or args.train:
|
|
surrogate = phase_train(results_dir, study_name, logger, epochs=args.epochs)
|
|
|
|
if args.all or args.nn_optimize:
|
|
if surrogate is None:
|
|
if model_path.exists():
|
|
surrogate = BracketSurrogate(model_path=model_path)
|
|
else:
|
|
print("ERROR: No trained surrogate. Run --train first.")
|
|
return 1
|
|
|
|
phase_nn_optimize(results_dir, study_name, surrogate, n_trials=args.nn_trials, logger=logger)
|
|
|
|
if args.all or args.validate:
|
|
if surrogate is None:
|
|
if model_path.exists():
|
|
surrogate = BracketSurrogate(model_path=model_path)
|
|
else:
|
|
print("ERROR: No trained surrogate. Run --train first.")
|
|
return 1
|
|
|
|
phase_validate(results_dir, model_dir, config, surrogate,
|
|
n_validate=args.validate_count, logger=logger,
|
|
study_name=study_name)
|
|
|
|
# Hybrid loop mode - adaptive refinement
|
|
if args.hybrid_loop:
|
|
phase_hybrid_loop(
|
|
results_dir=results_dir,
|
|
model_dir=model_dir,
|
|
config=config,
|
|
study_name=study_name,
|
|
n_iterations=args.iterations,
|
|
nn_trials_per_iter=args.nn_trials,
|
|
validate_per_iter=args.validate_count,
|
|
epochs=args.epochs,
|
|
logger=logger
|
|
)
|
|
|
|
# Turbo mode - aggressive single-best validation
|
|
if args.turbo:
|
|
phase_turbo_loop(
|
|
results_dir=results_dir,
|
|
model_dir=model_dir,
|
|
config=config,
|
|
study_name=study_name,
|
|
total_nn_trials=args.nn_trials,
|
|
nn_batch_size=args.batch_size,
|
|
retrain_every=args.retrain_every,
|
|
epochs=args.epochs,
|
|
logger=logger
|
|
)
|
|
|
|
print(f"\n{'#'*60}")
|
|
print("# Workflow Complete!")
|
|
print(f"{'#'*60}\n")
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
exit(main())
|