feat: Add Zernike GNN surrogate module and M1 mirror V12/V13 studies
This commit introduces the GNN-based surrogate for Zernike mirror optimization and the M1 mirror study progression from V12 (GNN validation) to V13 (pure NSGA-II). ## GNN Surrogate Module (optimization_engine/gnn/) New module for Graph Neural Network surrogate prediction of mirror deformations: - `polar_graph.py`: PolarMirrorGraph - fixed 3000-node polar grid structure - `zernike_gnn.py`: ZernikeGNN with design-conditioned message passing - `differentiable_zernike.py`: GPU-accelerated Zernike fitting and objectives - `train_zernike_gnn.py`: ZernikeGNNTrainer with multi-task loss - `gnn_optimizer.py`: ZernikeGNNOptimizer for turbo mode (~900k trials/hour) - `extract_displacement_field.py`: OP2 to HDF5 field extraction - `backfill_field_data.py`: Extract fields from existing FEA trials Key innovation: Design-conditioned convolutions that modulate message passing based on structural design parameters, enabling accurate field prediction. ## M1 Mirror Studies ### V12: GNN Field Prediction + FEA Validation - Zernike GNN trained on V10/V11 FEA data (238 samples) - Turbo mode: 5000 GNN predictions → top candidates → FEA validation - Calibration workflow for GNN-to-FEA error correction - Scripts: run_gnn_turbo.py, validate_gnn_best.py, compute_full_calibration.py ### V13: Pure NSGA-II FEA (Ground Truth) - Seeds 217 FEA trials from V11+V12 - Pure multi-objective NSGA-II without any surrogate - Establishes ground-truth Pareto front for GNN accuracy evaluation - Narrowed blank_backface_angle range to [4.0, 5.0] ## Documentation Updates - SYS_14: Added Zernike GNN section with architecture diagrams - CLAUDE.md: Added GNN module reference and quick start - V13 README: Study documentation with seeding strategy 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
239
studies/m1_mirror_adaptive_V12/validate_gnn_best.py
Normal file
239
studies/m1_mirror_adaptive_V12/validate_gnn_best.py
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Validate GNN Best Designs with FEA
|
||||
===================================
|
||||
Reads best designs from gnn_turbo_results.json and validates with actual FEA.
|
||||
|
||||
Usage:
|
||||
python validate_gnn_best.py # Full validation (solve + extract)
|
||||
python validate_gnn_best.py --resume # Resume: skip existing OP2, just extract Zernike
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from optimization_engine.gnn.gnn_optimizer import ZernikeGNNOptimizer, GNNPrediction
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
|
||||
# Paths
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
RESULTS_FILE = STUDY_DIR / "gnn_turbo_results.json"
|
||||
CONFIG_PATH = STUDY_DIR / "1_setup" / "optimization_config.json"
|
||||
CHECKPOINT_PATH = Path("C:/Users/Antoine/Atomizer/zernike_gnn_checkpoint.pt")
|
||||
|
||||
|
||||
def extract_from_existing_op2(study_dir: Path, turbo_results: dict, config: dict) -> list:
|
||||
"""Extract Zernike from existing OP2 files in iter9000-9002."""
|
||||
import numpy as np
|
||||
|
||||
iterations_dir = study_dir / "2_iterations"
|
||||
zernike_settings = config.get('zernike_settings', {})
|
||||
|
||||
results = []
|
||||
design_keys = ['best_40_vs_20', 'best_60_vs_20', 'best_mfg_90']
|
||||
|
||||
for i, key in enumerate(design_keys):
|
||||
trial_num = 9000 + i
|
||||
iter_dir = iterations_dir / f"iter{trial_num}"
|
||||
|
||||
print(f"\n[{i+1}/3] Processing {iter_dir.name} ({key})")
|
||||
|
||||
# Find OP2 file
|
||||
op2_files = list(iter_dir.glob("*-solution_1.op2"))
|
||||
if not op2_files:
|
||||
print(f" ERROR: No OP2 file found")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'no_op2',
|
||||
'trial_num': trial_num
|
||||
})
|
||||
continue
|
||||
|
||||
op2_path = op2_files[0]
|
||||
size_mb = op2_path.stat().st_size / 1e6
|
||||
print(f" OP2: {op2_path.name} ({size_mb:.1f} MB)")
|
||||
|
||||
if size_mb < 50:
|
||||
print(f" ERROR: OP2 too small, likely incomplete")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'incomplete_op2',
|
||||
'trial_num': trial_num
|
||||
})
|
||||
continue
|
||||
|
||||
# Extract Zernike
|
||||
try:
|
||||
extractor = ZernikeExtractor(
|
||||
str(op2_path),
|
||||
bdf_path=None,
|
||||
displacement_unit=zernike_settings.get('displacement_unit', 'mm'),
|
||||
n_modes=zernike_settings.get('n_modes', 50),
|
||||
filter_orders=zernike_settings.get('filter_low_orders', 4)
|
||||
)
|
||||
|
||||
ref = zernike_settings.get('reference_subcase', '2')
|
||||
|
||||
# Extract objectives: 40 vs 20, 60 vs 20, mfg 90
|
||||
rel_40 = extractor.extract_relative("3", ref)
|
||||
rel_60 = extractor.extract_relative("4", ref)
|
||||
rel_90 = extractor.extract_relative("1", ref)
|
||||
|
||||
fea_objectives = {
|
||||
'rel_filtered_rms_40_vs_20': rel_40['relative_filtered_rms_nm'],
|
||||
'rel_filtered_rms_60_vs_20': rel_60['relative_filtered_rms_nm'],
|
||||
'mfg_90_optician_workload': rel_90['relative_rms_filter_j1to3'],
|
||||
}
|
||||
|
||||
# Compute errors
|
||||
gnn_obj = turbo_results[key]['objectives']
|
||||
errors = {}
|
||||
for obj_name in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']:
|
||||
gnn_val = gnn_obj[obj_name]
|
||||
fea_val = fea_objectives[obj_name]
|
||||
errors[f'{obj_name}_abs_error'] = abs(gnn_val - fea_val)
|
||||
errors[f'{obj_name}_pct_error'] = 100 * abs(gnn_val - fea_val) / max(fea_val, 0.01)
|
||||
|
||||
print(f" FEA: 40vs20={fea_objectives['rel_filtered_rms_40_vs_20']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['rel_filtered_rms_40_vs_20']:.2f}, err: {errors['rel_filtered_rms_40_vs_20_pct_error']:.1f}%)")
|
||||
print(f" 60vs20={fea_objectives['rel_filtered_rms_60_vs_20']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['rel_filtered_rms_60_vs_20']:.2f}, err: {errors['rel_filtered_rms_60_vs_20_pct_error']:.1f}%)")
|
||||
print(f" mfg90={fea_objectives['mfg_90_optician_workload']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['mfg_90_optician_workload']:.2f}, err: {errors['mfg_90_optician_workload_pct_error']:.1f}%)")
|
||||
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': gnn_obj,
|
||||
'fea_objectives': fea_objectives,
|
||||
'errors': errors,
|
||||
'trial_num': trial_num,
|
||||
'status': 'success'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR extracting Zernike: {e}")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'extraction_error',
|
||||
'error': str(e),
|
||||
'trial_num': trial_num
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Validate GNN predictions with FEA')
|
||||
parser.add_argument('--resume', action='store_true',
|
||||
help='Resume: extract Zernike from existing OP2 files instead of re-solving')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load GNN turbo results
|
||||
print("Loading GNN turbo results...")
|
||||
with open(RESULTS_FILE) as f:
|
||||
turbo_results = json.load(f)
|
||||
|
||||
# Load config
|
||||
with open(CONFIG_PATH) as f:
|
||||
config = json.load(f)
|
||||
|
||||
# Show candidates
|
||||
candidates = []
|
||||
for key in ['best_40_vs_20', 'best_60_vs_20', 'best_mfg_90']:
|
||||
data = turbo_results[key]
|
||||
pred = GNNPrediction(
|
||||
design_vars=data['design_vars'],
|
||||
objectives={k: float(v) for k, v in data['objectives'].items()}
|
||||
)
|
||||
candidates.append(pred)
|
||||
print(f"\n{key}:")
|
||||
print(f" 40vs20: {pred.objectives['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
print(f" 60vs20: {pred.objectives['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
print(f" mfg90: {pred.objectives['mfg_90_optician_workload']:.2f} nm")
|
||||
|
||||
if args.resume:
|
||||
# Resume mode: extract from existing OP2 files
|
||||
print("\n" + "="*60)
|
||||
print("RESUME MODE: Extracting Zernike from existing OP2 files")
|
||||
print("="*60)
|
||||
|
||||
validation_results = extract_from_existing_op2(STUDY_DIR, turbo_results, config)
|
||||
else:
|
||||
# Full mode: run FEA + extract
|
||||
print("\n" + "="*60)
|
||||
print("LOADING GNN OPTIMIZER FOR FEA VALIDATION")
|
||||
print("="*60)
|
||||
|
||||
optimizer = ZernikeGNNOptimizer.from_checkpoint(CHECKPOINT_PATH, CONFIG_PATH)
|
||||
print(f"Design variables: {len(optimizer.design_names)}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("RUNNING FEA VALIDATION")
|
||||
print("="*60)
|
||||
|
||||
validation_results = optimizer.validate_with_fea(
|
||||
candidates=candidates,
|
||||
study_dir=STUDY_DIR,
|
||||
verbose=True,
|
||||
start_trial_num=9000
|
||||
)
|
||||
|
||||
# Summary
|
||||
import numpy as np
|
||||
successful = [r for r in validation_results if r['status'] == 'success']
|
||||
print(f"\n{'='*60}")
|
||||
print(f"VALIDATION SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
print(f"Successful: {len(successful)}/{len(validation_results)}")
|
||||
|
||||
if successful:
|
||||
avg_errors = {}
|
||||
for obj in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']:
|
||||
avg_errors[obj] = np.mean([r['errors'][f'{obj}_pct_error'] for r in successful])
|
||||
|
||||
print(f"\nAverage GNN prediction errors:")
|
||||
print(f" 40 vs 20: {avg_errors['rel_filtered_rms_40_vs_20']:.1f}%")
|
||||
print(f" 60 vs 20: {avg_errors['rel_filtered_rms_60_vs_20']:.1f}%")
|
||||
print(f" mfg 90: {avg_errors['mfg_90_optician_workload']:.1f}%")
|
||||
|
||||
# Save validation report
|
||||
from datetime import datetime
|
||||
output_path = STUDY_DIR / "gnn_validation_report.json"
|
||||
|
||||
report = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'mode': 'resume' if args.resume else 'full',
|
||||
'n_candidates': len(validation_results),
|
||||
'n_successful': len(successful),
|
||||
'results': validation_results,
|
||||
}
|
||||
|
||||
if successful:
|
||||
report['error_summary'] = {
|
||||
obj: {
|
||||
'mean_pct': float(np.mean([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
'std_pct': float(np.std([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
'max_pct': float(np.max([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
}
|
||||
for obj in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']
|
||||
}
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(report, f, indent=2)
|
||||
|
||||
print(f"\nValidation report saved to: {output_path}")
|
||||
print("\nDone!")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user