Files
Atomizer/studies/bracket_stiffness_optimization_atomizerfield/run_optimization.py
Antoine 8cbdbcad78 feat: Add Protocol 13 adaptive optimization, Plotly charts, and dashboard improvements
## Protocol 13: Adaptive Multi-Objective Optimization
- Iterative FEA + Neural Network surrogate workflow
- Initial FEA sampling, NN training, NN-accelerated search
- FEA validation of top NN predictions, retraining loop
- adaptive_state.json tracks iteration history and best values
- M1 mirror study (V11) with 103 FEA, 3000 NN trials

## Dashboard Visualization Enhancements
- Added Plotly.js interactive charts (parallel coords, Pareto, convergence)
- Lazy loading with React.lazy() for performance
- Code splitting: plotly.js-basic-dist (~1MB vs 3.5MB)
- Chart library toggle (Recharts default, Plotly on-demand)
- ExpandableChart component for full-screen modal views
- ConsoleOutput component for real-time log viewing

## Documentation
- Protocol 13 detailed documentation
- Dashboard visualization guide
- Plotly components README
- Updated run-optimization skill with Mode 5 (adaptive)

## Bug Fixes
- Fixed TypeScript errors in dashboard components
- Fixed Card component to accept ReactNode title
- Removed unused imports across components

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 07:41:54 -05:00

1035 lines
40 KiB
Python

"""
Bracket Stiffness Optimization with AtomizerField Neural Acceleration
======================================================================
Multi-objective optimization: maximize stiffness, minimize mass
Protocol: NSGA-II (Protocol 11)
This script demonstrates the complete AtomizerField workflow:
1. FEA Exploration Phase (50 trials) - Collects training data
2. Auto-Training - Triggers neural network training at 50 points
3. Neural Acceleration Phase (50+ trials) - 2,200x faster optimization
Staged Workflow:
----------------
1. VALIDATE: Clean old solver files, run 1 trial to validate setup
python run_optimization.py --validate
2. TEST: Run 3 trials as integration test
python run_optimization.py --test
3. RUN: Launch official optimization
python run_optimization.py --run --trials 100
Other Usage:
# Resume existing study
python run_optimization.py --run --trials 25 --resume
# Enable neural acceleration (requires trained model)
python run_optimization.py --run --trials 100 --enable-nn --resume
"""
from pathlib import Path
import sys
import json
import argparse
from datetime import datetime
from typing import Optional, Tuple, List
# Add parent directory to path
project_root = Path(__file__).resolve().parents[2]
sys.path.insert(0, str(project_root))
import optuna
from optuna.samplers import NSGAIISampler
# Use NXSolver (subprocess-based) instead of direct NXOpen imports
from optimization_engine.nx_solver import NXSolver
# Import extractors
from optimization_engine.extractors.bdf_mass_extractor import extract_mass_from_bdf
from optimization_engine.extractors.extract_displacement import extract_displacement
# Import structured logger
from optimization_engine.logger import get_logger
# Import training data exporter for AtomizerField
from optimization_engine.training_data_exporter import TrainingDataExporter
# Import neural surrogate for fast predictions
from optimization_engine.neural_surrogate import create_surrogate_for_study, NeuralSurrogate, ParametricSurrogate
def load_config(config_file: Path) -> dict:
"""Load configuration from JSON file."""
with open(config_file, 'r') as f:
return json.load(f)
def clean_nastran_files(model_dir: Path, logger) -> List[Path]:
"""
Remove old Nastran solver output files to ensure fresh validation.
Cleans: *.op2, *.f06, *.log, *.f04, *.pch, _temp*.txt
Preserves: *.dat, *.bdf, *.prt, *.sim, *.fem
Args:
model_dir: Path to model directory containing solver files
logger: Logger instance for reporting
Returns:
List of deleted file paths
"""
nastran_extensions = ['*.op2', '*.f06', '*.log', '*.f04', '*.pch', '*.DBALL', '*.MASTER']
temp_patterns = ['_temp*.txt', '*_temp_*']
deleted_files = []
logger.info(f"\n{'='*60}")
logger.info("CLEANING OLD NASTRAN FILES")
logger.info(f"{'='*60}")
logger.info(f"Model directory: {model_dir}")
for pattern in nastran_extensions + temp_patterns:
matches = list(model_dir.glob(pattern))
for file_path in matches:
try:
file_path.unlink()
deleted_files.append(file_path)
logger.info(f" Deleted: {file_path.name}")
except Exception as e:
logger.warning(f" Failed to delete {file_path.name}: {e}")
if deleted_files:
logger.info(f"\nCleaned {len(deleted_files)} files")
else:
logger.info("\nNo old solver files found (directory is clean)")
return deleted_files
def run_validation(config: dict, nx_solver, model_dir: Path, results_dir: Path,
study_name: str, logger) -> bool:
"""
Run single trial validation to verify setup.
This validates:
- NX connection and journal execution
- Expression updates work correctly
- Solver completes successfully
- Result extraction works
Returns:
True if validation passed, False otherwise
"""
logger.info(f"\n{'='*60}")
logger.info("VALIDATION MODE - Single Trial Test")
logger.info(f"{'='*60}")
# Create temporary study for validation
storage = f"sqlite:///{results_dir / 'study_validation.db'}"
sampler = NSGAIISampler(population_size=5, seed=42)
study = optuna.create_study(
study_name=f"{study_name}_validation",
storage=storage,
sampler=sampler,
directions=['minimize', 'minimize'],
load_if_exists=False
)
# Run single trial
logger.info("\nRunning validation trial...")
try:
study.optimize(
lambda trial: fea_objective(trial, config, nx_solver, model_dir, logger, None),
n_trials=1,
show_progress_bar=False
)
# Check result
if len(study.trials) == 1:
trial = study.trials[0]
if trial.state == optuna.trial.TrialState.COMPLETE:
stiffness = trial.user_attrs.get('stiffness', 'N/A')
mass = trial.user_attrs.get('mass', 'N/A')
disp = trial.user_attrs.get('max_displacement', 'N/A')
logger.info(f"\n{'='*60}")
logger.info("VALIDATION PASSED!")
logger.info(f"{'='*60}")
logger.info(f"Results from validation trial:")
logger.info(f" Stiffness: {stiffness:.2f} N/mm" if isinstance(stiffness, float) else f" Stiffness: {stiffness}")
logger.info(f" Mass: {mass:.4f} kg" if isinstance(mass, float) else f" Mass: {mass}")
logger.info(f" Max Displacement: {disp:.6f} mm" if isinstance(disp, float) else f" Max Displacement: {disp}")
logger.info(f"\nDesign variables used:")
for var in config['design_variables']:
param = var['parameter']
value = trial.params.get(param, 'N/A')
logger.info(f" {param}: {value}")
logger.info(f"\n{'='*60}")
logger.info("Next steps:")
logger.info(" 1. Review results above for sanity")
logger.info(" 2. Run integration test: python run_optimization.py --test")
logger.info(" 3. Launch full optimization: python run_optimization.py --run --trials 100")
logger.info(f"{'='*60}")
return True
else:
logger.error(f"\nVALIDATION FAILED!")
logger.error(f"Trial state: {trial.state}")
return False
else:
logger.error(f"\nVALIDATION FAILED - No trials completed")
return False
except Exception as e:
logger.error(f"\nVALIDATION FAILED with exception: {e}")
import traceback
traceback.print_exc()
return False
def discover_output_files(model_dir: Path, logger) -> dict:
"""
Scan model directory for all Nastran/FEA output files after a solve.
This helps identify what data is available for extraction.
Returns:
dict with categorized files and their details
"""
logger.info(f"\n{'='*60}")
logger.info("DISCOVERING OUTPUT FILES")
logger.info(f"{'='*60}")
discovered = {
'nastran_results': [], # .op2, .f06, .pch
'nastran_input': [], # .dat, .bdf
'nx_files': [], # .prt, .sim, .fem
'temp_files': [], # _temp*.txt
'mesh_files': [], # .vtk, .vtu, .h5
'other': []
}
patterns = {
'nastran_results': ['*.op2', '*.f06', '*.pch', '*.f04', '*.log', '*.DBALL', '*.MASTER'],
'nastran_input': ['*.dat', '*.bdf'],
'nx_files': ['*.prt', '*.sim', '*.fem'],
'temp_files': ['_temp*.txt', '*_temp_*'],
'mesh_files': ['*.vtk', '*.vtu', '*.h5']
}
for category, globs in patterns.items():
for pattern in globs:
for file_path in model_dir.glob(pattern):
file_info = {
'name': file_path.name,
'path': str(file_path),
'size_kb': file_path.stat().st_size / 1024,
'modified': datetime.fromtimestamp(file_path.stat().st_mtime).strftime('%Y-%m-%d %H:%M:%S')
}
discovered[category].append(file_info)
# Report findings
logger.info(f"\nModel directory: {model_dir}\n")
for category, files in discovered.items():
if files:
logger.info(f"{category.upper().replace('_', ' ')}:")
for f in files:
logger.info(f" {f['name']:40} {f['size_kb']:>10.1f} KB ({f['modified']})")
# Summary
total_files = sum(len(files) for files in discovered.values())
logger.info(f"\nTotal files found: {total_files}")
# Key files for AtomizerField
op2_files = [f for f in discovered['nastran_results'] if f['name'].endswith('.op2')]
dat_files = [f for f in discovered['nastran_input'] if f['name'].endswith('.dat')]
if op2_files:
logger.info(f"\n[OK] OP2 file found: {op2_files[0]['name']} - Field data available for AtomizerField")
else:
logger.warning("\n[!] No OP2 file found - Run a solve first")
if dat_files:
logger.info(f"[OK] DAT file found: {dat_files[0]['name']} - Mass extraction available")
else:
logger.warning("[!] No DAT file found - Check simulation setup")
return discovered
def run_discovery(config: dict, nx_solver, model_dir: Path, results_dir: Path,
study_name: str, logger) -> bool:
"""
Discovery mode: Intelligently scan model, then run ONE solve to discover all outputs.
This is the first step when setting up a new study - it shows you what
data is available for extraction and AtomizerField training.
Steps:
1. Scan model to discover solutions, expressions, mesh info
2. Clean old solver output files
3. Run ONE FEA solve (using first discovered solution)
4. Scan and report all generated files
5. Provide configuration guidance
Returns:
True if discovery completed successfully
"""
logger.info(f"\n{'='*60}")
logger.info("INTELLIGENT DISCOVERY MODE")
logger.info(f"{'='*60}")
logger.info("This mode will:")
logger.info(" 1. Scan model for solutions, expressions, and mesh info")
logger.info(" 2. Clean old solver output files")
logger.info(" 3. Run ONE FEA solve")
logger.info(" 4. Scan and report all generated files")
logger.info(" 5. Provide configuration guidance")
sim_file = model_dir / config['simulation']['sim_file']
# =========================================================================
# STEP 1: INTELLIGENT MODEL SCAN (discover solutions, expressions, etc.)
# =========================================================================
logger.info(f"\n{'='*60}")
logger.info("STEP 1: SCANNING MODEL STRUCTURE")
logger.info(f"{'='*60}")
model_info = nx_solver.discover_model(sim_file)
if model_info.get('success'):
# Report discovered solutions
solutions = model_info.get('solutions', [])
if solutions:
logger.info(f"\n[OK] Found {len(solutions)} solution(s):")
for sol in solutions:
logger.info(f" - {sol['name']} (use this in config: \"solution_name\": \"{sol['name']}\")")
# Auto-detect solution name for this run
discovered_solution = solutions[0]['name']
logger.info(f"\n[AUTO] Will use first solution: \"{discovered_solution}\"")
else:
logger.warning("\n[!] No solutions found in model - will solve all")
discovered_solution = None
# Report discovered expressions
expressions = model_info.get('expressions', [])
if expressions:
logger.info(f"\n[OK] Found {len(expressions)} expression(s) (potential design variables):")
for expr in expressions[:10]: # Show first 10
value_str = f" = {expr.get('value', '?')}" if expr.get('value') else ""
logger.info(f" - {expr['name']}{value_str}")
if len(expressions) > 10:
logger.info(f" ... and {len(expressions) - 10} more")
else:
logger.info("\n[INFO] No expressions found (model may use direct parameters)")
# Report mesh info
mesh_info = model_info.get('mesh_info', {})
if mesh_info:
logger.info(f"\n[OK] Mesh info:")
logger.info(f" Elements: {mesh_info.get('element_count', 'Unknown')}")
logger.info(f" Nodes: {mesh_info.get('node_count', 'Unknown')}")
else:
logger.warning(f"\n[!] Model scan failed: {model_info.get('error', 'Unknown error')}")
logger.warning(" Falling back to config-specified solution name")
discovered_solution = config['simulation'].get('solution_name')
# =========================================================================
# STEP 2: CLEAN OLD FILES
# =========================================================================
clean_nastran_files(model_dir, logger)
# =========================================================================
# STEP 3: RUN SINGLE SOLVE
# =========================================================================
logger.info(f"\n{'='*60}")
logger.info("STEP 3: RUNNING SINGLE SOLVE")
logger.info(f"{'='*60}")
# Use discovered solution or fall back to config
solution_to_use = discovered_solution or config['simulation'].get('solution_name')
if solution_to_use:
logger.info(f" Using solution: \"{solution_to_use}\"")
else:
logger.info(f" Solving ALL solutions (no specific solution specified)")
# Create design vars at midpoint of bounds
design_vars = {}
for var in config['design_variables']:
param_name = var['parameter']
bounds = var['bounds']
midpoint = (bounds[0] + bounds[1]) / 2
design_vars[param_name] = midpoint
logger.info(f" {param_name}: {midpoint:.4f} (midpoint of [{bounds[0]}, {bounds[1]}])")
try:
result = nx_solver.run_simulation(
sim_file=sim_file,
working_dir=model_dir,
expression_updates=design_vars,
solution_name=solution_to_use,
cleanup=False # Keep all files for discovery
)
if not result['success']:
logger.error(f"\nSolve FAILED: {result.get('error', 'Unknown error')}")
logger.error("Check that NX is running and model is properly set up")
return False
logger.info(f"\nSolve SUCCESSFUL!")
logger.info(f" OP2 file: {result.get('op2_file', 'N/A')}")
except Exception as e:
logger.error(f"\nSolve FAILED with exception: {e}")
import traceback
traceback.print_exc()
return False
# =========================================================================
# STEP 4: DISCOVER OUTPUT FILES
# =========================================================================
discovered = discover_output_files(model_dir, logger)
# =========================================================================
# STEP 5: CONFIGURATION GUIDANCE
# =========================================================================
logger.info(f"\n{'='*60}")
logger.info("CONFIGURATION GUIDANCE")
logger.info(f"{'='*60}")
op2_files = [f for f in discovered['nastran_results'] if f['name'].endswith('.op2')]
dat_files = [f for f in discovered['nastran_input'] if f['name'].endswith('.dat')]
if op2_files and dat_files:
logger.info("\n[OK] Model is ready for optimization!")
logger.info("\nRecommended optimization_config.json settings:")
if solution_to_use:
logger.info(f' "solution_name": "{solution_to_use}"')
logger.info(f' "op2_file": "{op2_files[0]["name"]}"')
logger.info(f' "dat_file": "{dat_files[0]["name"]}"')
logger.info(f"\n{'='*60}")
logger.info("NEXT STEPS")
logger.info(f"{'='*60}")
logger.info(" 1. Review discovered files and solutions above")
logger.info(" 2. Update optimization_config.json if needed")
logger.info(" 3. Run validation: python run_optimization.py --validate")
logger.info(" 4. Run test (3 trials): python run_optimization.py --test")
logger.info(" 5. Run FEA training data collection: python run_optimization.py --train --trials 50")
logger.info(" 6. Run full AtomizerField: python run_optimization.py --run --trials 100 --enable-nn")
else:
logger.warning("\n[!] Some required files are missing!")
if not op2_files:
logger.warning(" - No OP2 file: Check solver executed correctly")
if not dat_files:
logger.warning(" - No DAT file: Check FEM export settings")
return True
def run_test(config: dict, nx_solver, model_dir: Path, results_dir: Path,
study_name: str, logger, n_trials: int = 3) -> bool:
"""
Run integration test with 3 trials.
This tests:
- Multiple sequential trials complete
- NSGA-II sampling works
- Results are stored correctly
Returns:
True if all test trials passed, False otherwise
"""
logger.info(f"\n{'='*60}")
logger.info(f"TEST MODE - {n_trials} Trial Integration Test")
logger.info(f"{'='*60}")
# Create temporary study for testing
storage = f"sqlite:///{results_dir / 'study_test.db'}"
sampler = NSGAIISampler(population_size=5, seed=42)
study = optuna.create_study(
study_name=f"{study_name}_test",
storage=storage,
sampler=sampler,
directions=['minimize', 'minimize'],
load_if_exists=False
)
logger.info(f"\nRunning {n_trials} test trials...")
start_time = datetime.now()
try:
study.optimize(
lambda trial: fea_objective(trial, config, nx_solver, model_dir, logger, None),
n_trials=n_trials,
show_progress_bar=True
)
elapsed = datetime.now() - start_time
n_complete = len([t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE])
n_failed = len([t for t in study.trials if t.state == optuna.trial.TrialState.FAIL])
logger.info(f"\n{'='*60}")
if n_complete == n_trials:
logger.info("TEST PASSED!")
elif n_complete > 0:
logger.info("TEST PARTIALLY PASSED")
else:
logger.info("TEST FAILED!")
logger.info(f"{'='*60}")
logger.info(f"Results:")
logger.info(f" Completed: {n_complete}/{n_trials}")
logger.info(f" Failed: {n_failed}/{n_trials}")
logger.info(f" Duration: {elapsed}")
logger.info(f" Avg time/trial: {elapsed.total_seconds()/n_trials:.1f}s")
# Show trial results
logger.info(f"\nTrial Summary:")
for trial in study.trials:
state = "OK" if trial.state == optuna.trial.TrialState.COMPLETE else "FAIL"
if trial.state == optuna.trial.TrialState.COMPLETE:
stiff = -trial.values[0]
mass = trial.values[1]
logger.info(f" Trial {trial.number}: [{state}] stiffness={stiff:.2f}, mass={mass:.4f}")
else:
logger.info(f" Trial {trial.number}: [{state}]")
if n_complete == n_trials:
logger.info(f"\n{'='*60}")
logger.info("Ready for official optimization!")
logger.info("Launch: python run_optimization.py --run --trials 100")
logger.info(f"{'='*60}")
return True
elif n_complete > 0:
logger.info(f"\n{'='*60}")
logger.info("Some trials failed. Review errors above.")
logger.info("You may proceed with caution or fix issues first.")
logger.info(f"{'='*60}")
return False
else:
logger.error(f"\nAll trials failed! Check NX connection and model setup.")
return False
except Exception as e:
logger.error(f"\nTEST FAILED with exception: {e}")
import traceback
traceback.print_exc()
return False
def neural_objective(trial: optuna.Trial, config: dict, surrogate: NeuralSurrogate,
model_dir: Path, logger) -> Tuple[float, float]:
"""
Neural surrogate objective function for FAST optimization.
Uses trained neural network instead of FEA - 600x+ faster!
Returns tuple: (stiffness, mass) for NSGA-II optimization
- Maximize stiffness (negate for minimize direction)
- Minimize mass
"""
# Sample design variables
design_vars = {}
for var in config['design_variables']:
param_name = var['parameter']
bounds = var['bounds']
design_vars[param_name] = trial.suggest_float(param_name, bounds[0], bounds[1])
logger.trial_start(trial.number, design_vars)
try:
# Get neural network predictions (FAST!)
prediction = surrogate.predict(design_vars)
# Extract predictions - the ParametricSurrogate predicts all objectives directly
max_displacement = prediction.get('max_displacement', 0.001)
inference_time = prediction.get('inference_time_ms', 0)
# Get predicted mass directly from neural network (if available)
# ParametricSurrogate predicts mass, frequency, displacement, and stress
mass_kg = prediction.get('mass', None)
# Calculate stiffness from predicted displacement
# Assuming fixed force of 1000N (verify from your model)
applied_force = 1000.0 # N - adjust based on your model
stiffness = applied_force / max(abs(max_displacement), 1e-6) # N/mm
# Fallback to BDF extraction if neural network doesn't predict mass
if mass_kg is None:
dat_file = model_dir / config['simulation']['dat_file']
try:
mass_kg = extract_mass_from_bdf(str(dat_file))
except Exception:
# Fallback: estimate mass from design variables
mass_kg = 0.1 # Placeholder
logger.info(f" [NEURAL] stiffness: {stiffness:.2f} N/mm, mass: {mass_kg:.4f} kg")
logger.info(f" [NEURAL] max_disp: {max_displacement:.6f} mm")
logger.info(f" [NEURAL] inference: {inference_time:.2f} ms (vs ~30s FEA)")
# Check constraints
feasible = True
mass_limit = 0.2 # kg - from config
if mass_kg > mass_limit:
feasible = False
logger.warning(f" Constraint violation: mass = {mass_kg:.4f} > {mass_limit}")
# Set user attributes
trial.set_user_attr('stiffness', stiffness)
trial.set_user_attr('mass', mass_kg)
trial.set_user_attr('max_displacement', max_displacement)
trial.set_user_attr('feasible', feasible)
trial.set_user_attr('neural_predicted', True)
trial.set_user_attr('inference_time_ms', inference_time)
objectives = {'stiffness': stiffness, 'mass': mass_kg}
logger.trial_complete(trial.number, objectives, {'mass_limit': mass_kg}, feasible)
# Return objectives for NSGA-II
# directions=['maximize', 'minimize'] -> (-stiffness, mass)
return (-stiffness, mass_kg)
except Exception as e:
logger.trial_failed(trial.number, f"Neural prediction failed: {str(e)}")
return (float('inf'), float('inf'))
def fea_objective(trial: optuna.Trial, config: dict, nx_solver: NXSolver,
model_dir: Path, logger,
training_exporter: Optional[TrainingDataExporter] = None) -> Tuple[float, float]:
"""
Multi-objective function for bracket stiffness optimization.
Returns tuple: (-stiffness, mass) for NSGA-II optimization
- Maximize stiffness (negated for minimization)
- Minimize mass
"""
# Sample design variables
design_vars = {}
for var in config['design_variables']:
param_name = var['parameter']
bounds = var['bounds']
design_vars[param_name] = trial.suggest_float(param_name, bounds[0], bounds[1])
logger.trial_start(trial.number, design_vars)
try:
# Get file paths
sim_file = model_dir / config['simulation']['sim_file']
# Run FEA simulation via NXSolver (subprocess-based)
result = nx_solver.run_simulation(
sim_file=sim_file,
working_dir=model_dir,
expression_updates=design_vars,
solution_name=config['simulation'].get('solution_name'),
cleanup=(training_exporter is None)
)
if not result['success']:
logger.trial_failed(trial.number, f"Simulation failed: {result.get('error', 'Unknown')}")
return (float('inf'), float('inf'))
# Get output files
op2_file = result['op2_file']
logger.info(f"Simulation successful: {op2_file}")
# Extract mass from BDF/DAT file
dat_file = model_dir / config['simulation']['dat_file']
mass_kg = extract_mass_from_bdf(str(dat_file))
logger.info(f" mass: {mass_kg:.4f} kg (from BDF)")
# Extract displacement from OP2 and calculate stiffness
disp_result = extract_displacement(op2_file, subcase=1)
max_displacement = disp_result['max_displacement']
# Calculate stiffness: k = F / delta
# Applied force should match your model's loading (check sim file)
applied_force = 1000.0 # N - adjust based on your model's applied load
stiffness = applied_force / max(abs(max_displacement), 1e-6) # N/mm
logger.info(f" stiffness: {stiffness:.2f} N/mm")
logger.info(f" max_displacement: {max_displacement:.6f} mm")
# Check constraints
feasible = True
constraint_results = {'mass_limit': mass_kg}
for constraint in config.get('constraints', []):
name = constraint['name']
threshold = constraint['threshold']
if name == 'mass_limit':
if mass_kg > threshold:
feasible = False
logger.warning(f" Constraint violation: mass = {mass_kg:.4f} > {threshold}")
# Set user attributes
trial.set_user_attr('stiffness', stiffness)
trial.set_user_attr('mass', mass_kg)
trial.set_user_attr('max_displacement', max_displacement)
trial.set_user_attr('feasible', feasible)
objectives = {'stiffness': stiffness, 'mass': mass_kg}
logger.trial_complete(trial.number, objectives, constraint_results, feasible)
# Export training data for AtomizerField neural network
if training_exporter is not None:
op2_path = Path(op2_file)
dat_path = op2_path.with_suffix('.dat')
export_results = {
'objectives': {'stiffness': stiffness, 'mass': mass_kg},
'constraints': constraint_results,
'max_displacement': max_displacement,
'feasible': feasible
}
simulation_files = {
'dat_file': dat_path,
'op2_file': op2_path
}
export_success = training_exporter.export_trial(
trial_number=trial.number,
design_variables=design_vars,
results=export_results,
simulation_files=simulation_files
)
if export_success:
logger.info(f" Training data exported for trial {trial.number}")
else:
logger.warning(f" Failed to export training data for trial {trial.number}")
# Return objectives for NSGA-II (maximize stiffness, minimize mass)
# Using directions=['minimize', 'minimize'] with -stiffness
return (-stiffness, mass_kg)
except Exception as e:
logger.trial_failed(trial.number, str(e))
return (float('inf'), float('inf'))
def main():
"""Main optimization workflow with staged validation and neural surrogate integration."""
parser = argparse.ArgumentParser(
description='Bracket Stiffness Optimization with AtomizerField Neural Acceleration',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Staged Workflow (recommended order):
1. --discover Clean old files, run ONE solve, discover outputs
2. --validate Run single trial to validate extraction works
3. --test Run 3 trials as integration test
4. --train Run FEA trials for training data collection
5. --run Launch official optimization (with --enable-nn for neural)
Examples:
python run_optimization.py --discover
python run_optimization.py --validate
python run_optimization.py --test
python run_optimization.py --train --trials 50
python run_optimization.py --run --trials 100 --enable-nn --resume
"""
)
# Workflow stage selection (mutually exclusive)
stage_group = parser.add_mutually_exclusive_group()
stage_group.add_argument('--discover', action='store_true',
help='Stage 1: Clean files, run ONE solve, discover outputs')
stage_group.add_argument('--validate', action='store_true',
help='Stage 2: Run single validation trial')
stage_group.add_argument('--test', action='store_true',
help='Stage 3: Run 3-trial integration test')
stage_group.add_argument('--train', action='store_true',
help='Stage 4: Run FEA trials for training data')
stage_group.add_argument('--run', action='store_true',
help='Stage 5: Launch official optimization')
# Common options
parser.add_argument('--trials', type=int, default=100,
help='Number of optimization trials (default: 100)')
parser.add_argument('--resume', action='store_true',
help='Resume from existing study')
parser.add_argument('--enable-nn', action='store_true',
help='Enable neural surrogate (requires trained model)')
parser.add_argument('--no-export', action='store_true',
help='Disable training data export')
parser.add_argument('--clean', action='store_true',
help='Clean old Nastran files before running')
args = parser.parse_args()
# Default to --run if no stage specified
if not any([args.discover, args.validate, args.test, args.train, args.run]):
print("No workflow stage specified. Use --discover, --validate, --test, --train, or --run")
print("Run with --help for usage information")
return 1
# Setup paths
study_dir = Path(__file__).parent
config_path = study_dir / "1_setup" / "optimization_config.json"
model_dir = study_dir / "1_setup" / "model"
results_dir = study_dir / "2_results"
results_dir.mkdir(exist_ok=True)
study_name = "bracket_stiffness_optimization_atomizerfield"
# Initialize logger
logger = get_logger(study_name, study_dir=results_dir)
# Load config
config = load_config(config_path)
# Initialize NX Solver (deferred - only when needed)
# For neural-only mode, we don't need NX at all
nx_solver = None
def get_nx_solver():
"""Lazily initialize NX solver when needed."""
nonlocal nx_solver
if nx_solver is None:
nx_solver = NXSolver()
return nx_solver
# Optional clean before any stage
if args.clean:
clean_nastran_files(model_dir, logger)
# =========================================================================
# STAGE 1: DISCOVER - Clean, run one solve, discover outputs
# =========================================================================
if args.discover:
logger.info(f"\n{'='*60}")
logger.info("STAGE 1: DISCOVER")
logger.info(f"{'='*60}")
success = run_discovery(config, get_nx_solver(), model_dir, results_dir, study_name, logger)
return 0 if success else 1
# =========================================================================
# STAGE 2: VALIDATE - Run single trial to validate extraction
# =========================================================================
if args.validate:
logger.info(f"\n{'='*60}")
logger.info("STAGE 2: VALIDATE")
logger.info(f"{'='*60}")
success = run_validation(config, get_nx_solver(), model_dir, results_dir, study_name, logger)
return 0 if success else 1
# =========================================================================
# STAGE 3: TEST - Run 3 trials as integration test
# =========================================================================
if args.test:
logger.info(f"\n{'='*60}")
logger.info("STAGE 3: TEST")
logger.info(f"{'='*60}")
success = run_test(config, get_nx_solver(), model_dir, results_dir, study_name, logger, n_trials=3)
return 0 if success else 1
# =========================================================================
# STAGE 4: TRAIN - Run FEA trials for training data collection
# =========================================================================
if args.train:
logger.info(f"\n{'='*60}")
logger.info("STAGE 4: TRAIN - FEA Data Collection")
logger.info(f"{'='*60}")
# Force training data export for this stage
args.no_export = False
# =========================================================================
# STAGE 5: RUN - Official optimization (FEA or Neural)
# =========================================================================
# (args.run or args.train fall through to here)
# Check neural surrogate status
neural_enabled = args.enable_nn
surrogate = None
if neural_enabled:
logger.info("Neural surrogate mode requested")
try:
surrogate = create_surrogate_for_study(project_root=project_root)
if surrogate is not None:
logger.info(f"Neural surrogate loaded successfully!")
logger.info(f" Model: {surrogate.model_path}")
logger.info(f" Device: {surrogate.device}")
logger.info(f" Expected speedup: 600x+ over FEA")
else:
logger.warning("Neural surrogate not available - falling back to FEA")
neural_enabled = False
except Exception as e:
logger.warning(f"Failed to initialize neural surrogate: {e}")
logger.warning("Falling back to FEA mode")
neural_enabled = False
# Initialize training data exporter
training_exporter = None
export_config = config.get('training_data_export', {})
# Enable export for --train stage or if config says so
should_export = (args.train or export_config.get('enabled', False)) and not args.no_export and not neural_enabled
if should_export:
export_dir = export_config.get('export_dir', f'atomizer_field_training_data/{study_name}')
if not Path(export_dir).is_absolute():
export_dir = project_root / export_dir
design_var_names = [dv['parameter'] for dv in config.get('design_variables', [])]
objective_names = [obj['name'] for obj in config.get('objectives', [])]
constraint_names = [c['name'] for c in config.get('constraints', [])]
training_exporter = TrainingDataExporter(
export_dir=export_dir,
study_name=study_name,
design_variable_names=design_var_names,
objective_names=objective_names,
constraint_names=constraint_names,
metadata={
'atomizer_version': '2.0',
'optimization_algorithm': 'NSGA-II',
'n_trials': args.trials,
'description': config.get('description', 'Bracket stiffness optimization')
}
)
logger.info(f"Training data export enabled: {export_dir}")
else:
logger.info("Training data export disabled")
# Create Optuna study (multi-objective)
storage = f"sqlite:///{results_dir / 'study.db'}"
sampler = NSGAIISampler(
population_size=20,
mutation_prob=0.1,
crossover_prob=0.9,
seed=42
)
stage_name = "TRAIN" if args.train else "RUN"
logger.study_start(study_name, args.trials, "NSGAIISampler")
if args.resume:
study = optuna.load_study(
study_name=study_name,
storage=storage,
sampler=sampler
)
logger.info(f"Resumed study with {len(study.trials)} existing trials")
else:
study = optuna.create_study(
study_name=study_name,
storage=storage,
sampler=sampler,
directions=['minimize', 'minimize'], # -stiffness, mass
load_if_exists=True
)
# Run optimization
logger.info(f"\n{'='*60}")
if args.train:
logger.info("STAGE 4: TRAIN - FEA Training Data Collection")
logger.info("Collecting FEA data for AtomizerField neural network")
elif neural_enabled and surrogate is not None:
logger.info("STAGE 5: RUN - Neural Accelerated Optimization")
logger.info("Using trained neural network for FAST predictions!")
else:
logger.info("STAGE 5: RUN - FEA Optimization")
logger.info(f"Trials: {args.trials}")
logger.info(f"Neural Surrogate: {'ENABLED - 600x+ speedup!' if neural_enabled else 'Disabled'}")
logger.info(f"Training Export: {'ENABLED' if training_exporter else 'Disabled'}")
logger.info(f"{'='*60}\n")
start_time = datetime.now()
try:
if neural_enabled and surrogate is not None:
study.optimize(
lambda trial: neural_objective(trial, config, surrogate, model_dir, logger),
n_trials=args.trials,
show_progress_bar=True
)
else:
# FEA mode - need NX solver
solver = get_nx_solver()
study.optimize(
lambda trial: fea_objective(trial, config, solver, model_dir, logger, training_exporter),
n_trials=args.trials,
show_progress_bar=True
)
elapsed = datetime.now() - start_time
n_successful = len([t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE])
logger.study_complete(study_name, len(study.trials), n_successful)
# Report results
logger.info(f"\n{'='*60}")
logger.info(f"Optimization Complete")
logger.info(f"{'='*60}")
logger.info(f"Duration: {elapsed}")
logger.info(f"Total trials: {len(study.trials)}")
logger.info(f"Successful: {n_successful}")
if neural_enabled and surrogate is not None:
avg_time_per_trial_ms = (elapsed.total_seconds() * 1000) / max(n_successful, 1)
estimated_fea_time = n_successful * 30
actual_time = elapsed.total_seconds()
speedup = estimated_fea_time / max(actual_time, 0.001)
logger.info(f"\n [NEURAL PERFORMANCE]")
logger.info(f" Avg time per trial: {avg_time_per_trial_ms:.1f} ms")
logger.info(f" Estimated FEA time: {estimated_fea_time:.0f} seconds ({estimated_fea_time/60:.1f} min)")
logger.info(f" Actual neural time: {actual_time:.1f} seconds")
logger.info(f" SPEEDUP: {speedup:.0f}x faster!")
# Show Pareto front
pareto_trials = study.best_trials
logger.info(f"\nPareto Front ({len(pareto_trials)} solutions):")
for i, trial in enumerate(pareto_trials[:5]):
stiffness = -trial.values[0] # Convert back from negated
mass = trial.values[1]
feasible = trial.user_attrs.get('feasible', 'N/A')
logger.info(f" {i+1}. Stiffness: {stiffness:.2f} N/mm, Mass: {mass:.4f} kg, Feasible: {feasible}")
# Finalize training data export
if training_exporter is not None:
training_exporter.finalize()
logger.info(f"Training data finalized: {training_exporter.trial_count} trials exported")
# Next steps
if not neural_enabled and training_exporter is not None:
logger.info(f"\n{'='*60}")
logger.info("Next Steps for Neural Acceleration")
logger.info(f"{'='*60}")
logger.info(f"1. Training data collected: {training_exporter.export_dir}")
logger.info(f" Exported {training_exporter.trial_count} trials")
logger.info("2. Parse training data for neural network:")
logger.info(" cd atomizer-field")
logger.info(f" python batch_parser.py {training_exporter.export_dir}")
logger.info("3. Train neural network:")
logger.info(" python train.py --epochs 200")
logger.info("4. Re-run with neural surrogate:")
logger.info(" python run_optimization.py --trials 50 --enable-nn --resume")
except Exception as e:
if training_exporter is not None:
training_exporter.finalize()
logger.error(f"Optimization failed: {e}", exc_info=True)
raise
return 0
if __name__ == "__main__":
exit(main())