feat: Major update with validators, skills, dashboard, and docs reorganization
- Add validation framework (config, model, results, study validators)
- Add Claude Code skills (create-study, run-optimization, generate-report,
troubleshoot, analyze-model)
- Add Atomizer Dashboard (React frontend + FastAPI backend)
- Reorganize docs into structured directories (00-09)
- Add neural surrogate modules and training infrastructure
- Add multi-objective optimization support
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-25 19:23:58 -05:00
|
|
|
"""
|
|
|
|
|
Neural-Enhanced Optimization Runner
|
|
|
|
|
|
|
|
|
|
Extends the base OptimizationRunner with neural network surrogate capabilities
|
|
|
|
|
from AtomizerField for super-efficient optimization.
|
|
|
|
|
|
|
|
|
|
Features:
|
|
|
|
|
- Automatic neural surrogate integration when models are available
|
|
|
|
|
- Hybrid optimization with smart FEA/NN switching
|
|
|
|
|
- Confidence-based fallback to FEA
|
|
|
|
|
- Speedup tracking and reporting
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Dict, Any, List, Optional, Callable, Tuple
|
|
|
|
|
import json
|
|
|
|
|
import time
|
|
|
|
|
import logging
|
|
|
|
|
import numpy as np
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
import optuna
|
|
|
|
|
|
refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability.
Backwards compatibility aliases with deprecation warnings are provided.
New Structure:
- core/ - Optimization runners (runner, intelligent_optimizer, etc.)
- processors/ - Data processing
- surrogates/ - Neural network surrogates
- nx/ - NX/Nastran integration (solver, updater, session_manager)
- study/ - Study management (creator, wizard, state, reset)
- reporting/ - Reports and analysis (visualizer, report_generator)
- config/ - Configuration management (manager, builder)
- utils/ - Utilities (logger, auto_doc, etc.)
- future/ - Research/experimental code
Migration:
- ~200 import changes across 125 files
- All __init__.py files use lazy loading to avoid circular imports
- Backwards compatibility layer supports old import paths with warnings
- All existing functionality preserved
To migrate existing code:
OLD: from optimization_engine.nx_solver import NXSolver
NEW: from optimization_engine.nx.solver import NXSolver
OLD: from optimization_engine.runner import OptimizationRunner
NEW: from optimization_engine.core.runner import OptimizationRunner
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-29 12:30:59 -05:00
|
|
|
from optimization_engine.core.runner import OptimizationRunner
|
|
|
|
|
from optimization_engine.processors.surrogates.neural_surrogate import (
|
feat: Major update with validators, skills, dashboard, and docs reorganization
- Add validation framework (config, model, results, study validators)
- Add Claude Code skills (create-study, run-optimization, generate-report,
troubleshoot, analyze-model)
- Add Atomizer Dashboard (React frontend + FastAPI backend)
- Reorganize docs into structured directories (00-09)
- Add neural surrogate modules and training infrastructure
- Add multi-objective optimization support
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-25 19:23:58 -05:00
|
|
|
create_surrogate_from_config,
|
|
|
|
|
create_hybrid_optimizer_from_config,
|
|
|
|
|
NeuralSurrogate,
|
|
|
|
|
HybridOptimizer
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NeuralOptimizationRunner(OptimizationRunner):
|
|
|
|
|
"""
|
|
|
|
|
Extended optimization runner with neural network surrogate support.
|
|
|
|
|
|
|
|
|
|
Seamlessly integrates AtomizerField neural models to achieve 600x-500,000x
|
|
|
|
|
speedup over traditional FEA-based optimization.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
config_path: Path,
|
|
|
|
|
model_updater: Callable,
|
|
|
|
|
simulation_runner: Callable,
|
|
|
|
|
result_extractors: Dict[str, Callable]
|
|
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Initialize neural-enhanced optimization runner.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
config_path: Path to optimization_config.json
|
|
|
|
|
model_updater: Function to update NX model parameters
|
|
|
|
|
simulation_runner: Function to run FEA simulation
|
|
|
|
|
result_extractors: Dictionary of result extraction functions
|
|
|
|
|
"""
|
|
|
|
|
# Initialize base class
|
|
|
|
|
super().__init__(config_path, model_updater, simulation_runner, result_extractors)
|
|
|
|
|
|
|
|
|
|
# Initialize neural surrogate components
|
|
|
|
|
self.neural_surrogate: Optional[NeuralSurrogate] = None
|
|
|
|
|
self.hybrid_optimizer: Optional[HybridOptimizer] = None
|
|
|
|
|
self.neural_speedup_tracker = []
|
|
|
|
|
|
|
|
|
|
# Try to initialize neural components
|
|
|
|
|
self._initialize_neural_components()
|
|
|
|
|
|
|
|
|
|
def _initialize_neural_components(self):
|
|
|
|
|
"""Initialize neural surrogate and hybrid optimizer if configured."""
|
|
|
|
|
try:
|
|
|
|
|
# Create neural surrogate from config
|
|
|
|
|
self.neural_surrogate = create_surrogate_from_config(self.config)
|
|
|
|
|
|
|
|
|
|
if self.neural_surrogate:
|
|
|
|
|
logger.info("✓ Neural surrogate initialized successfully")
|
|
|
|
|
logger.info(f" Model: {self.neural_surrogate.model_checkpoint}")
|
|
|
|
|
logger.info(f" Confidence threshold: {self.neural_surrogate.confidence_threshold}")
|
|
|
|
|
|
|
|
|
|
# Create hybrid optimizer for smart FEA/NN switching
|
|
|
|
|
self.hybrid_optimizer = create_hybrid_optimizer_from_config(self.config)
|
|
|
|
|
|
|
|
|
|
if self.hybrid_optimizer:
|
|
|
|
|
logger.info("✓ Hybrid optimizer initialized")
|
|
|
|
|
logger.info(f" Exploration trials: {self.hybrid_optimizer.exploration_trials}")
|
|
|
|
|
logger.info(f" Validation frequency: {self.hybrid_optimizer.validation_frequency}")
|
|
|
|
|
else:
|
|
|
|
|
logger.info("Neural surrogate not configured - using standard FEA optimization")
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(f"Could not initialize neural components: {e}")
|
|
|
|
|
logger.info("Falling back to standard FEA optimization")
|
|
|
|
|
|
|
|
|
|
def _objective_function_with_neural(self, trial: optuna.Trial) -> float:
|
|
|
|
|
"""
|
|
|
|
|
Enhanced objective function with neural network surrogate support.
|
|
|
|
|
|
|
|
|
|
Attempts to use neural network for fast prediction, falls back to FEA
|
|
|
|
|
when confidence is low or validation is needed.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
trial: Optuna trial object
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Objective value (float)
|
|
|
|
|
"""
|
|
|
|
|
# Sample design variables (same as base class)
|
|
|
|
|
design_vars = self._sample_design_variables(trial)
|
|
|
|
|
|
|
|
|
|
# Decide whether to use neural network or FEA
|
|
|
|
|
use_neural = False
|
|
|
|
|
nn_prediction = None
|
|
|
|
|
nn_confidence = 0.0
|
|
|
|
|
|
|
|
|
|
if self.neural_surrogate and self.hybrid_optimizer:
|
|
|
|
|
# Check if hybrid optimizer recommends using NN
|
|
|
|
|
if self.hybrid_optimizer.should_use_nn(trial.number):
|
|
|
|
|
# Try neural network prediction
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Get case data for the current model
|
|
|
|
|
case_data = self._prepare_case_data(design_vars)
|
|
|
|
|
|
|
|
|
|
# Get neural network prediction
|
|
|
|
|
predictions, confidence, used_nn = self.neural_surrogate.predict(
|
|
|
|
|
design_vars,
|
|
|
|
|
case_data=case_data
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if used_nn and predictions is not None:
|
|
|
|
|
# Successfully used neural network
|
|
|
|
|
nn_time = time.time() - start_time
|
|
|
|
|
use_neural = True
|
|
|
|
|
nn_prediction = predictions
|
|
|
|
|
nn_confidence = confidence
|
|
|
|
|
|
|
|
|
|
logger.info(f"Trial {trial.number}: Used neural network (confidence: {confidence:.2%}, time: {nn_time:.3f}s)")
|
|
|
|
|
|
|
|
|
|
# Track speedup
|
|
|
|
|
self.neural_speedup_tracker.append({
|
|
|
|
|
'trial': trial.number,
|
|
|
|
|
'nn_time': nn_time,
|
|
|
|
|
'confidence': confidence
|
|
|
|
|
})
|
|
|
|
|
else:
|
|
|
|
|
logger.info(f"Trial {trial.number}: Neural confidence too low ({confidence:.2%}), using FEA")
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(f"Trial {trial.number}: Neural prediction failed: {e}, using FEA")
|
|
|
|
|
|
|
|
|
|
# Execute hooks and get results
|
|
|
|
|
if use_neural and nn_prediction is not None:
|
|
|
|
|
# Use neural network results
|
|
|
|
|
extracted_results = self._process_neural_results(nn_prediction, design_vars)
|
|
|
|
|
|
|
|
|
|
# Skip model update and simulation since we used NN
|
|
|
|
|
result_path = None
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
# Fall back to standard FEA (using base class method)
|
|
|
|
|
return super()._objective_function(trial)
|
|
|
|
|
|
|
|
|
|
# Process constraints and objectives (same as base class)
|
|
|
|
|
return self._evaluate_objectives_and_constraints(
|
|
|
|
|
trial, design_vars, extracted_results, result_path
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _sample_design_variables(self, trial: optuna.Trial) -> Dict[str, float]:
|
|
|
|
|
"""
|
|
|
|
|
Sample design variables from trial (extracted from base class).
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
trial: Optuna trial object
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Dictionary of design variable values
|
|
|
|
|
"""
|
|
|
|
|
design_vars = {}
|
|
|
|
|
|
|
|
|
|
# Handle both dict and list formats for design_variables
|
|
|
|
|
if isinstance(self.config['design_variables'], dict):
|
|
|
|
|
for var_name, var_info in self.config['design_variables'].items():
|
|
|
|
|
if var_info['type'] == 'continuous':
|
|
|
|
|
value = trial.suggest_float(
|
|
|
|
|
var_name,
|
|
|
|
|
var_info['min'],
|
|
|
|
|
var_info['max']
|
|
|
|
|
)
|
|
|
|
|
precision = self._get_precision(var_name, var_info.get('units', ''))
|
|
|
|
|
design_vars[var_name] = round(value, precision)
|
|
|
|
|
elif var_info['type'] in ['discrete', 'integer']:
|
|
|
|
|
design_vars[var_name] = trial.suggest_int(
|
|
|
|
|
var_name,
|
|
|
|
|
int(var_info['min']),
|
|
|
|
|
int(var_info['max'])
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
# Old format
|
|
|
|
|
for dv in self.config['design_variables']:
|
|
|
|
|
if dv['type'] == 'continuous':
|
|
|
|
|
value = trial.suggest_float(
|
|
|
|
|
dv['name'],
|
|
|
|
|
dv['bounds'][0],
|
|
|
|
|
dv['bounds'][1]
|
|
|
|
|
)
|
|
|
|
|
precision = self._get_precision(dv['name'], dv.get('units', ''))
|
|
|
|
|
design_vars[dv['name']] = round(value, precision)
|
|
|
|
|
elif dv['type'] == 'discrete':
|
|
|
|
|
design_vars[dv['name']] = trial.suggest_int(
|
|
|
|
|
dv['name'],
|
|
|
|
|
int(dv['bounds'][0]),
|
|
|
|
|
int(dv['bounds'][1])
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return design_vars
|
|
|
|
|
|
|
|
|
|
def _prepare_case_data(self, design_vars: Dict[str, float]) -> Optional[Dict[str, Any]]:
|
|
|
|
|
"""
|
|
|
|
|
Prepare case-specific data for neural network prediction.
|
|
|
|
|
|
|
|
|
|
This includes mesh file paths, boundary conditions, loads, etc.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
design_vars: Current design variable values
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Case data dictionary or None
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
case_data = {
|
|
|
|
|
'fem_file': self.config.get('fem_file', ''),
|
|
|
|
|
'sim_file': self.config.get('sim_file', ''),
|
|
|
|
|
'design_variables': design_vars,
|
|
|
|
|
# Add any case-specific data needed by the neural network
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Add boundary conditions if specified
|
|
|
|
|
if 'boundary_conditions' in self.config:
|
|
|
|
|
case_data['boundary_conditions'] = self.config['boundary_conditions']
|
|
|
|
|
|
|
|
|
|
# Add loads if specified
|
|
|
|
|
if 'loads' in self.config:
|
|
|
|
|
case_data['loads'] = self.config['loads']
|
|
|
|
|
|
|
|
|
|
return case_data
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(f"Could not prepare case data: {e}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def _process_neural_results(
|
|
|
|
|
self,
|
|
|
|
|
nn_prediction: Dict[str, Any],
|
|
|
|
|
design_vars: Dict[str, float]
|
|
|
|
|
) -> Dict[str, float]:
|
|
|
|
|
"""
|
|
|
|
|
Process neural network predictions into extracted results format.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
nn_prediction: Raw neural network predictions
|
|
|
|
|
design_vars: Current design variable values
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Dictionary of extracted results matching objective/constraint names
|
|
|
|
|
"""
|
|
|
|
|
extracted_results = {}
|
|
|
|
|
|
|
|
|
|
# Map neural network outputs to objective/constraint names
|
|
|
|
|
for obj in self.config['objectives']:
|
|
|
|
|
obj_name = obj['name']
|
|
|
|
|
|
|
|
|
|
# Try to find matching prediction
|
|
|
|
|
if obj_name in nn_prediction:
|
|
|
|
|
value = nn_prediction[obj_name]
|
|
|
|
|
elif 'metric' in obj and obj['metric'] in nn_prediction:
|
|
|
|
|
value = nn_prediction[obj['metric']]
|
|
|
|
|
else:
|
|
|
|
|
# Try common mappings
|
|
|
|
|
metric_mappings = {
|
|
|
|
|
'max_stress': ['max_von_mises_stress', 'stress', 'von_mises'],
|
|
|
|
|
'max_displacement': ['max_displacement', 'displacement', 'disp'],
|
|
|
|
|
'mass': ['mass', 'weight'],
|
|
|
|
|
'volume': ['volume'],
|
|
|
|
|
'compliance': ['compliance', 'strain_energy'],
|
|
|
|
|
'frequency': ['frequency', 'natural_frequency', 'freq']
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
value = None
|
|
|
|
|
for mapped_names in metric_mappings.get(obj_name, []):
|
|
|
|
|
if mapped_names in nn_prediction:
|
|
|
|
|
value = nn_prediction[mapped_names]
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if value is None:
|
|
|
|
|
raise ValueError(f"Could not find neural prediction for objective '{obj_name}'")
|
|
|
|
|
|
|
|
|
|
# Apply appropriate precision
|
|
|
|
|
precision = self._get_precision(obj_name, obj.get('units', ''))
|
|
|
|
|
extracted_results[obj_name] = round(float(value), precision)
|
|
|
|
|
|
|
|
|
|
# Process constraints similarly
|
|
|
|
|
for const in self.config.get('constraints', []):
|
|
|
|
|
const_name = const['name']
|
|
|
|
|
|
|
|
|
|
if const_name in nn_prediction:
|
|
|
|
|
value = nn_prediction[const_name]
|
|
|
|
|
elif 'metric' in const and const['metric'] in nn_prediction:
|
|
|
|
|
value = nn_prediction[const['metric']]
|
|
|
|
|
else:
|
|
|
|
|
# Try to reuse objective values if constraint uses same metric
|
|
|
|
|
if const_name in extracted_results:
|
|
|
|
|
value = extracted_results[const_name]
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(f"Could not find neural prediction for constraint '{const_name}'")
|
|
|
|
|
|
|
|
|
|
precision = self._get_precision(const_name, const.get('units', ''))
|
|
|
|
|
extracted_results[const_name] = round(float(value), precision)
|
|
|
|
|
|
|
|
|
|
return extracted_results
|
|
|
|
|
|
|
|
|
|
def _evaluate_objectives_and_constraints(
|
|
|
|
|
self,
|
|
|
|
|
trial: optuna.Trial,
|
|
|
|
|
design_vars: Dict[str, float],
|
|
|
|
|
extracted_results: Dict[str, float],
|
|
|
|
|
result_path: Optional[Path]
|
|
|
|
|
) -> float:
|
|
|
|
|
"""
|
|
|
|
|
Evaluate objectives and constraints (extracted from base class).
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
trial: Optuna trial object
|
|
|
|
|
design_vars: Design variable values
|
|
|
|
|
extracted_results: Extracted simulation/NN results
|
|
|
|
|
result_path: Path to result files (None if using NN)
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Total objective value
|
|
|
|
|
"""
|
|
|
|
|
# Export training data if using FEA
|
|
|
|
|
if self.training_data_exporter and result_path:
|
|
|
|
|
self._export_training_data(trial.number, design_vars, extracted_results, result_path)
|
|
|
|
|
|
|
|
|
|
# Evaluate constraints
|
|
|
|
|
for const in self.config.get('constraints', []):
|
|
|
|
|
value = extracted_results[const['name']]
|
|
|
|
|
limit = const['limit']
|
|
|
|
|
|
|
|
|
|
if const['type'] == 'upper_bound' and value > limit:
|
|
|
|
|
logger.info(f"Constraint violated: {const['name']} = {value:.4f} > {limit:.4f}")
|
|
|
|
|
raise optuna.TrialPruned()
|
|
|
|
|
elif const['type'] == 'lower_bound' and value < limit:
|
|
|
|
|
logger.info(f"Constraint violated: {const['name']} = {value:.4f} < {limit:.4f}")
|
|
|
|
|
raise optuna.TrialPruned()
|
|
|
|
|
|
|
|
|
|
# Calculate weighted objective
|
|
|
|
|
total_objective = 0.0
|
|
|
|
|
for obj in self.config['objectives']:
|
|
|
|
|
value = extracted_results[obj['name']]
|
|
|
|
|
weight = obj.get('weight', 1.0)
|
|
|
|
|
direction = obj.get('direction', 'minimize')
|
|
|
|
|
|
|
|
|
|
if direction == 'minimize':
|
|
|
|
|
total_objective += weight * value
|
|
|
|
|
else: # maximize
|
|
|
|
|
total_objective -= weight * value
|
|
|
|
|
|
|
|
|
|
# Store in history
|
|
|
|
|
history_entry = {
|
|
|
|
|
'trial_number': trial.number,
|
|
|
|
|
'timestamp': datetime.now().isoformat(),
|
|
|
|
|
'design_variables': design_vars,
|
|
|
|
|
'objectives': {obj['name']: extracted_results[obj['name']] for obj in self.config['objectives']},
|
|
|
|
|
'constraints': {const['name']: extracted_results[const['name']] for const in self.config.get('constraints', [])},
|
|
|
|
|
'total_objective': total_objective,
|
|
|
|
|
'used_neural': result_path is None # Track if NN was used
|
|
|
|
|
}
|
|
|
|
|
self.history.append(history_entry)
|
|
|
|
|
|
|
|
|
|
# Save history
|
|
|
|
|
self._save_history()
|
|
|
|
|
|
|
|
|
|
logger.info(f"Trial {trial.number} completed:")
|
|
|
|
|
logger.info(f" Design vars: {design_vars}")
|
|
|
|
|
logger.info(f" Objectives: {history_entry['objectives']}")
|
|
|
|
|
logger.info(f" Total objective: {total_objective:.6f}")
|
|
|
|
|
if history_entry.get('used_neural'):
|
|
|
|
|
logger.info(f" Method: Neural Network")
|
|
|
|
|
|
|
|
|
|
return total_objective
|
|
|
|
|
|
|
|
|
|
def run(
|
|
|
|
|
self,
|
|
|
|
|
study_name: Optional[str] = None,
|
|
|
|
|
n_trials: Optional[int] = None,
|
|
|
|
|
resume: bool = False
|
|
|
|
|
) -> optuna.Study:
|
|
|
|
|
"""
|
|
|
|
|
Run neural-enhanced optimization.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
study_name: Optional study name
|
|
|
|
|
n_trials: Number of trials to run
|
|
|
|
|
resume: Whether to resume existing study
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Completed Optuna study
|
|
|
|
|
"""
|
|
|
|
|
# Override objective function if neural surrogate is available
|
|
|
|
|
if self.neural_surrogate:
|
|
|
|
|
# Temporarily replace objective function
|
|
|
|
|
original_objective = self._objective_function
|
|
|
|
|
self._objective_function = self._objective_function_with_neural
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Run optimization using base class
|
|
|
|
|
study = super().run(study_name, n_trials, resume)
|
|
|
|
|
|
|
|
|
|
# Print neural speedup summary if applicable
|
|
|
|
|
if self.neural_speedup_tracker:
|
|
|
|
|
self._print_speedup_summary()
|
|
|
|
|
|
|
|
|
|
return study
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
# Restore original objective function if replaced
|
|
|
|
|
if self.neural_surrogate:
|
|
|
|
|
self._objective_function = original_objective
|
|
|
|
|
|
|
|
|
|
def _print_speedup_summary(self):
|
|
|
|
|
"""Print summary of neural network speedup achieved."""
|
|
|
|
|
if not self.neural_speedup_tracker:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
nn_trials = len(self.neural_speedup_tracker)
|
|
|
|
|
total_trials = len(self.history)
|
|
|
|
|
nn_percentage = (nn_trials / total_trials) * 100
|
|
|
|
|
|
|
|
|
|
avg_nn_time = np.mean([t['nn_time'] for t in self.neural_speedup_tracker])
|
|
|
|
|
avg_confidence = np.mean([t['confidence'] for t in self.neural_speedup_tracker])
|
|
|
|
|
|
|
|
|
|
# Estimate FEA time (rough estimate if not tracked)
|
|
|
|
|
estimated_fea_time = 30 * 60 # 30 minutes in seconds
|
|
|
|
|
estimated_speedup = estimated_fea_time / avg_nn_time
|
|
|
|
|
|
|
|
|
|
print("\n" + "="*60)
|
|
|
|
|
print("NEURAL NETWORK SPEEDUP SUMMARY")
|
|
|
|
|
print("="*60)
|
|
|
|
|
print(f"Trials using neural network: {nn_trials}/{total_trials} ({nn_percentage:.1f}%)")
|
|
|
|
|
print(f"Average NN inference time: {avg_nn_time:.3f} seconds")
|
|
|
|
|
print(f"Average NN confidence: {avg_confidence:.1%}")
|
|
|
|
|
print(f"Estimated speedup: {estimated_speedup:.0f}x")
|
|
|
|
|
print(f"Time saved: ~{(estimated_fea_time - avg_nn_time) * nn_trials / 3600:.1f} hours")
|
|
|
|
|
print("="*60)
|
|
|
|
|
|
|
|
|
|
def update_neural_model(self, new_checkpoint: Path):
|
|
|
|
|
"""
|
|
|
|
|
Update the neural network model checkpoint.
|
|
|
|
|
|
|
|
|
|
Useful for updating to a newly trained model during optimization.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
new_checkpoint: Path to new model checkpoint
|
|
|
|
|
"""
|
|
|
|
|
if self.neural_surrogate:
|
|
|
|
|
try:
|
|
|
|
|
self.neural_surrogate.load_model(new_checkpoint)
|
|
|
|
|
logger.info(f"Updated neural model to: {new_checkpoint}")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Failed to update neural model: {e}")
|
|
|
|
|
|
|
|
|
|
def train_neural_model(self, training_data_dir: Path, epochs: int = 100):
|
|
|
|
|
"""
|
|
|
|
|
Train a new neural model on collected data.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
training_data_dir: Directory containing training data
|
|
|
|
|
epochs: Number of training epochs
|
|
|
|
|
"""
|
|
|
|
|
if self.hybrid_optimizer:
|
|
|
|
|
try:
|
|
|
|
|
model_path = self.hybrid_optimizer.train_surrogate_model(training_data_dir, epochs)
|
|
|
|
|
|
|
|
|
|
# Update to use the newly trained model
|
|
|
|
|
if model_path and self.neural_surrogate:
|
|
|
|
|
self.update_neural_model(model_path)
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Failed to train neural model: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_neural_runner(
|
|
|
|
|
config_path: Path,
|
|
|
|
|
model_updater: Callable,
|
|
|
|
|
simulation_runner: Callable,
|
|
|
|
|
result_extractors: Dict[str, Callable]
|
|
|
|
|
) -> NeuralOptimizationRunner:
|
|
|
|
|
"""
|
|
|
|
|
Factory function to create a neural-enhanced optimization runner.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
config_path: Path to optimization configuration
|
|
|
|
|
model_updater: Function to update model parameters
|
|
|
|
|
simulation_runner: Function to run simulation
|
|
|
|
|
result_extractors: Dictionary of result extraction functions
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
NeuralOptimizationRunner instance
|
|
|
|
|
"""
|
|
|
|
|
return NeuralOptimizationRunner(
|
|
|
|
|
config_path,
|
|
|
|
|
model_updater,
|
|
|
|
|
simulation_runner,
|
|
|
|
|
result_extractors
|
|
|
|
|
)
|