feat: Add OPD method support to Zernike visualization with Standard/OPD toggle
Major improvements to Zernike WFE visualization: - Add ZernikeDashboardInsight: Unified dashboard with all orientations (40°, 60°, 90°) on one page with light theme and executive summary - Add OPD method toggle: Switch between Standard (Z-only) and OPD (X,Y,Z) methods in ZernikeWFEInsight with interactive buttons - Add lateral displacement maps: Visualize X,Y displacement for each orientation - Add displacement component views: Toggle between WFE, ΔX, ΔY, ΔZ in relative views - Add metrics comparison table showing both methods side-by-side New extractors: - extract_zernike_figure.py: ZernikeOPDExtractor using BDF geometry interpolation - extract_zernike_opd.py: Parabola-based OPD with focal length Key finding: OPD method gives 8-11% higher WFE values than Standard method (more conservative/accurate for surfaces with lateral displacement under gravity) Documentation updates: - SYS_12: Added E22 ZernikeOPD as recommended method - SYS_16: Added ZernikeDashboard, updated ZernikeWFE with OPD features - Cheatsheet: Added Zernike method comparison table 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
150
tests/analyze_v11.py
Normal file
150
tests/analyze_v11.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""Analyze V11 optimization results."""
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
import json
|
||||
import sys
|
||||
sys.path.insert(0, '.')
|
||||
|
||||
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11/3_results/study.db')
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
c = conn.cursor()
|
||||
|
||||
# Get all completed trials with their objectives
|
||||
c.execute('''
|
||||
SELECT t.trial_id
|
||||
FROM trials t
|
||||
WHERE t.state = 'COMPLETE'
|
||||
ORDER BY t.trial_id
|
||||
''')
|
||||
completed_ids = [row[0] for row in c.fetchall()]
|
||||
print(f'Completed trials: {len(completed_ids)}')
|
||||
|
||||
# Build trial data
|
||||
trials = []
|
||||
for tid in completed_ids:
|
||||
c.execute('''
|
||||
SELECT key, value_json
|
||||
FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
''', (tid,))
|
||||
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
|
||||
|
||||
if 'rel_filtered_rms_40_vs_20' in attrs:
|
||||
trials.append({
|
||||
'id': tid,
|
||||
'rms_40': attrs.get('rel_filtered_rms_40_vs_20', 999),
|
||||
'rms_60': attrs.get('rel_filtered_rms_60_vs_20', 999),
|
||||
'mfg_90': attrs.get('mfg_90_optician_workload', 999),
|
||||
'ws': attrs.get('weighted_sum', 999),
|
||||
'lateral': attrs.get('lateral_rms_um', 0),
|
||||
})
|
||||
|
||||
# Sort by weighted sum or calculate it
|
||||
for t in trials:
|
||||
if t['ws'] == 999:
|
||||
# Calculate weighted sum
|
||||
w40 = 100 / 4.0 # Target 4 nm
|
||||
w60 = 50 / 10.0 # Target 10 nm
|
||||
w90 = 20 / 20.0 # Target 20 nm
|
||||
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
|
||||
|
||||
# Find best
|
||||
trials.sort(key=lambda x: x['ws'])
|
||||
best = trials[0] if trials else None
|
||||
|
||||
print('\nV11 Optimization Summary')
|
||||
print('=' * 70)
|
||||
print(f'Completed trials: {len(trials)}')
|
||||
|
||||
if trials:
|
||||
rms40 = [t['rms_40'] for t in trials]
|
||||
rms60 = [t['rms_60'] for t in trials]
|
||||
mfg90 = [t['mfg_90'] for t in trials]
|
||||
|
||||
print(f'\n40-20 RMS range: {min(rms40):.2f} - {max(rms40):.2f} nm (target: 4.0 nm)')
|
||||
print(f'60-20 RMS range: {min(rms60):.2f} - {max(rms60):.2f} nm (target: 10.0 nm)')
|
||||
print(f'90 MFG range: {min(mfg90):.2f} - {max(mfg90):.2f} nm (target: 20.0 nm)')
|
||||
|
||||
print(f'\nBest Trial (#{best["id"]}):')
|
||||
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
||||
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
||||
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
||||
print(f' Lateral: {best["lateral"]:.3f} um')
|
||||
print(f' WS: {best["ws"]:.1f}')
|
||||
|
||||
# Check if values make sense (not too good to be true)
|
||||
print('\nSanity Check:')
|
||||
if best['rms_40'] < 3.0:
|
||||
print(' WARNING: 40-20 RMS suspiciously low!')
|
||||
else:
|
||||
print(f' 40-20 RMS {best["rms_40"]:.2f} nm - looks reasonable (expected ~6-7nm)')
|
||||
|
||||
if best['rms_60'] < 8.0:
|
||||
print(' WARNING: 60-20 RMS suspiciously low!')
|
||||
else:
|
||||
print(f' 60-20 RMS {best["rms_60"]:.2f} nm - looks reasonable (expected ~13-15nm)')
|
||||
|
||||
# Compare to V7 baseline and V10 buggy values
|
||||
print('\n' + '=' * 70)
|
||||
print('Comparison to Known Values:')
|
||||
print('=' * 70)
|
||||
print('\nV7 Baseline (original Zernike, correct):')
|
||||
print(' 40-20 RMS: 6.05 nm')
|
||||
print(' 60-20 RMS: 13.03 nm')
|
||||
print(' MFG 90: 26.34 nm')
|
||||
|
||||
print('\nV10 BUGGY (abs(RMS_target - RMS_ref) - WRONG):')
|
||||
print(' 40-20 RMS: ~1.99 nm <- WAY TOO LOW')
|
||||
print(' 60-20 RMS: ~6.82 nm <- WAY TOO LOW')
|
||||
|
||||
print('\nV11 Best (extract_relative() - CORRECT):')
|
||||
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
||||
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
||||
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
||||
|
||||
# Verdict
|
||||
print('\n' + '=' * 70)
|
||||
print('VERDICT:')
|
||||
print('=' * 70)
|
||||
if best['rms_40'] > 5.0 and best['rms_40'] < 10.0:
|
||||
print(' 40-20: CORRECT - matches expected V7 range')
|
||||
else:
|
||||
print(f' 40-20: INVESTIGATE - {best["rms_40"]:.2f} nm is outside expected range')
|
||||
|
||||
if best['rms_60'] > 10.0 and best['rms_60'] < 20.0:
|
||||
print(' 60-20: CORRECT - matches expected V7 range')
|
||||
else:
|
||||
print(f' 60-20: INVESTIGATE - {best["rms_60"]:.2f} nm is outside expected range')
|
||||
|
||||
conn.close()
|
||||
|
||||
# Now generate Zernike dashboard for visual verification
|
||||
print('\n' + '=' * 70)
|
||||
print('Generating Zernike Dashboard for Best Iteration...')
|
||||
print('=' * 70)
|
||||
|
||||
from optimization_engine.insights import ZernikeDashboardInsight, InsightConfig
|
||||
|
||||
study_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11')
|
||||
|
||||
# Find the iteration with the best design
|
||||
best_archive = study_path / '3_results' / 'best_design_archive'
|
||||
if best_archive.exists():
|
||||
op2_files = list(best_archive.glob('*.op2'))
|
||||
if op2_files:
|
||||
print(f'Found best design archive: {op2_files[0].name}')
|
||||
|
||||
insight = ZernikeDashboardInsight(study_path)
|
||||
if insight.can_generate():
|
||||
print('Generating dashboard...')
|
||||
result = insight.generate(InsightConfig())
|
||||
if result.success:
|
||||
print(f'\nDashboard generated: {result.html_path}')
|
||||
print(f'\nDashboard Summary:')
|
||||
for key, value in result.summary.items():
|
||||
print(f' {key}: {value}')
|
||||
else:
|
||||
print(f'Error: {result.error}')
|
||||
else:
|
||||
print('Cannot generate insight - no OP2 files found')
|
||||
252
tests/compare_v8_zernike_methods.py
Normal file
252
tests/compare_v8_zernike_methods.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
Compare V8 Best Candidate: OPD Method vs Standard Z-Only Method
|
||||
|
||||
This script extracts WFE using both methods and compares the results
|
||||
to quantify the difference between using full X,Y,Z displacement (OPD)
|
||||
vs just Z displacement (Standard).
|
||||
"""
|
||||
import sys
|
||||
sys.path.insert(0, '.')
|
||||
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
# Find V8 best trial
|
||||
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V8/3_results/study.db')
|
||||
print(f"V8 Database: {db_path}")
|
||||
print(f"Exists: {db_path.exists()}")
|
||||
|
||||
if not db_path.exists():
|
||||
print("ERROR: V8 database not found!")
|
||||
sys.exit(1)
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
c = conn.cursor()
|
||||
|
||||
# Get all completed trials with their objectives
|
||||
c.execute('''
|
||||
SELECT t.trial_id
|
||||
FROM trials t
|
||||
WHERE t.state = 'COMPLETE'
|
||||
ORDER BY t.trial_id
|
||||
''')
|
||||
completed_ids = [row[0] for row in c.fetchall()]
|
||||
print(f"Completed trials: {len(completed_ids)}")
|
||||
|
||||
# Build trial data and find best
|
||||
trials = []
|
||||
for tid in completed_ids:
|
||||
c.execute('''
|
||||
SELECT key, value_json
|
||||
FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
''', (tid,))
|
||||
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
|
||||
|
||||
# V8 used different objective names - check what's available
|
||||
rms_40 = attrs.get('rel_filtered_rms_40_vs_20', attrs.get('filtered_rms_40_20', None))
|
||||
rms_60 = attrs.get('rel_filtered_rms_60_vs_20', attrs.get('filtered_rms_60_20', None))
|
||||
mfg_90 = attrs.get('mfg_90_optician_workload', attrs.get('optician_workload_90', None))
|
||||
ws = attrs.get('weighted_sum', None)
|
||||
|
||||
if rms_40 is not None:
|
||||
trials.append({
|
||||
'id': tid,
|
||||
'rms_40': rms_40,
|
||||
'rms_60': rms_60 if rms_60 else 999,
|
||||
'mfg_90': mfg_90 if mfg_90 else 999,
|
||||
'ws': ws if ws else 999,
|
||||
})
|
||||
|
||||
conn.close()
|
||||
|
||||
if not trials:
|
||||
# Check what keys are available
|
||||
conn = sqlite3.connect(db_path)
|
||||
c = conn.cursor()
|
||||
c.execute('SELECT DISTINCT key FROM trial_user_attributes LIMIT 20')
|
||||
keys = [row[0] for row in c.fetchall()]
|
||||
print(f"\nAvailable attribute keys: {keys}")
|
||||
conn.close()
|
||||
print("ERROR: No trials found with expected objective names!")
|
||||
sys.exit(1)
|
||||
|
||||
# Calculate weighted sum if not present
|
||||
for t in trials:
|
||||
if t['ws'] == 999:
|
||||
w40 = 100 / 4.0
|
||||
w60 = 50 / 10.0
|
||||
w90 = 20 / 20.0
|
||||
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
|
||||
|
||||
# Find best
|
||||
trials.sort(key=lambda x: x['ws'])
|
||||
best = trials[0]
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print("V8 Best Trial Summary")
|
||||
print('='*70)
|
||||
print(f"Best Trial: #{best['id']}")
|
||||
print(f" 40-20 RMS: {best['rms_40']:.2f} nm")
|
||||
print(f" 60-20 RMS: {best['rms_60']:.2f} nm")
|
||||
print(f" MFG 90: {best['mfg_90']:.2f} nm")
|
||||
print(f" WS: {best['ws']:.1f}")
|
||||
|
||||
# Now compare both extraction methods on this trial
|
||||
iter_path = Path(f'studies/M1_Mirror/m1_mirror_cost_reduction_V8/2_iterations/iter{best["id"]}')
|
||||
op2_path = iter_path / 'assy_m1_assyfem1_sim1-solution_1.op2'
|
||||
geo_path = iter_path / 'assy_m1_assyfem1_sim1-solution_1.dat'
|
||||
|
||||
print(f"\nIteration path: {iter_path}")
|
||||
print(f"OP2 exists: {op2_path.exists()}")
|
||||
print(f"Geometry exists: {geo_path.exists()}")
|
||||
|
||||
if not op2_path.exists():
|
||||
print("ERROR: OP2 file not found for best trial!")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print("Comparing Zernike Methods: OPD vs Standard")
|
||||
print('='*70)
|
||||
|
||||
# Import extractors
|
||||
from optimization_engine.extractors.extract_zernike_figure import ZernikeOPDExtractor
|
||||
from optimization_engine.extractors.extract_zernike import ZernikeExtractor
|
||||
|
||||
# Standard method (Z-only)
|
||||
print("\n1. STANDARD METHOD (Z-only displacement)")
|
||||
print("-" * 50)
|
||||
try:
|
||||
std_extractor = ZernikeExtractor(
|
||||
op2_path,
|
||||
bdf_path=geo_path,
|
||||
n_modes=50,
|
||||
filter_orders=4
|
||||
)
|
||||
|
||||
# Extract relative WFE
|
||||
std_40_20 = std_extractor.extract_relative(target_subcase='3', reference_subcase='2')
|
||||
std_60_20 = std_extractor.extract_relative(target_subcase='4', reference_subcase='2')
|
||||
|
||||
# MFG uses J1-J3 filter - need new extractor instance
|
||||
std_extractor_mfg = ZernikeExtractor(
|
||||
op2_path,
|
||||
bdf_path=geo_path,
|
||||
n_modes=50,
|
||||
filter_orders=3 # J1-J3 for manufacturing
|
||||
)
|
||||
std_90 = std_extractor_mfg.extract_subcase(subcase_label='1')
|
||||
|
||||
print(f" 40-20 Relative RMS: {std_40_20['relative_filtered_rms_nm']:.2f} nm")
|
||||
print(f" 60-20 Relative RMS: {std_60_20['relative_filtered_rms_nm']:.2f} nm")
|
||||
print(f" 90 MFG (J1-J3): {std_90['filtered_rms_nm']:.2f} nm")
|
||||
|
||||
std_results = {
|
||||
'40_20': std_40_20['relative_filtered_rms_nm'],
|
||||
'60_20': std_60_20['relative_filtered_rms_nm'],
|
||||
'90_mfg': std_90['filtered_rms_nm'],
|
||||
}
|
||||
except Exception as e:
|
||||
print(f" ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
std_results = None
|
||||
|
||||
# OPD method (X,Y,Z displacement with mesh interpolation)
|
||||
print("\n2. OPD METHOD (X,Y,Z displacement with mesh interpolation)")
|
||||
print("-" * 50)
|
||||
try:
|
||||
opd_extractor = ZernikeOPDExtractor(
|
||||
op2_path,
|
||||
bdf_path=geo_path,
|
||||
n_modes=50,
|
||||
filter_orders=4
|
||||
)
|
||||
|
||||
# Extract relative WFE using OPD method
|
||||
opd_40_20 = opd_extractor.extract_relative(target_subcase='3', reference_subcase='2')
|
||||
opd_60_20 = opd_extractor.extract_relative(target_subcase='4', reference_subcase='2')
|
||||
|
||||
# MFG uses J1-J3 filter
|
||||
opd_extractor_mfg = ZernikeOPDExtractor(
|
||||
op2_path,
|
||||
bdf_path=geo_path,
|
||||
n_modes=50,
|
||||
filter_orders=3 # J1-J3 for manufacturing
|
||||
)
|
||||
opd_90 = opd_extractor_mfg.extract_subcase(subcase_label='1')
|
||||
|
||||
print(f" 40-20 Relative RMS: {opd_40_20['relative_filtered_rms_nm']:.2f} nm")
|
||||
print(f" 60-20 Relative RMS: {opd_60_20['relative_filtered_rms_nm']:.2f} nm")
|
||||
print(f" 90 MFG (J1-J3): {opd_90['filtered_rms_nm']:.2f} nm")
|
||||
|
||||
# Also get lateral displacement info
|
||||
print(f"\n Lateral Displacement (40° vs 20°):")
|
||||
print(f" Max: {opd_40_20.get('max_lateral_displacement_um', 'N/A')} µm")
|
||||
print(f" RMS: {opd_40_20.get('rms_lateral_displacement_um', 'N/A')} µm")
|
||||
|
||||
opd_results = {
|
||||
'40_20': opd_40_20['relative_filtered_rms_nm'],
|
||||
'60_20': opd_60_20['relative_filtered_rms_nm'],
|
||||
'90_mfg': opd_90['filtered_rms_nm'],
|
||||
'lateral_max': opd_40_20.get('max_lateral_displacement_um', 0),
|
||||
'lateral_rms': opd_40_20.get('rms_lateral_displacement_um', 0),
|
||||
}
|
||||
except Exception as e:
|
||||
print(f" ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
opd_results = None
|
||||
|
||||
# Comparison
|
||||
if std_results and opd_results:
|
||||
print(f"\n{'='*70}")
|
||||
print("COMPARISON: OPD vs Standard Method")
|
||||
print('='*70)
|
||||
|
||||
print(f"\n{'Metric':<25} {'Standard':<12} {'OPD':<12} {'Delta':<12} {'Delta %':<10}")
|
||||
print("-" * 70)
|
||||
|
||||
for key, label in [('40_20', '40-20 RMS (nm)'), ('60_20', '60-20 RMS (nm)'), ('90_mfg', '90 MFG (nm)')]:
|
||||
std_val = std_results[key]
|
||||
opd_val = opd_results[key]
|
||||
delta = opd_val - std_val
|
||||
delta_pct = 100 * delta / std_val if std_val > 0 else 0
|
||||
|
||||
print(f"{label:<25} {std_val:<12.2f} {opd_val:<12.2f} {delta:+<12.2f} {delta_pct:+.1f}%")
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print("INTERPRETATION")
|
||||
print('='*70)
|
||||
|
||||
delta_40 = opd_results['40_20'] - std_results['40_20']
|
||||
delta_60 = opd_results['60_20'] - std_results['60_20']
|
||||
|
||||
print(f"""
|
||||
The OPD method accounts for lateral (X,Y) displacement when computing WFE.
|
||||
|
||||
For telescope mirrors with lateral supports:
|
||||
- Gravity causes the mirror to shift laterally (X,Y) as well as sag (Z)
|
||||
- The Standard method ignores this lateral shift
|
||||
- The OPD method interpolates the ideal surface at deformed (x+dx, y+dy) positions
|
||||
|
||||
Key observations:
|
||||
- 40-20 difference: {delta_40:+.2f} nm ({100*delta_40/std_results['40_20']:+.1f}%)
|
||||
- 60-20 difference: {delta_60:+.2f} nm ({100*delta_60/std_results['60_20']:+.1f}%)
|
||||
- Lateral displacement: Max {opd_results['lateral_max']:.3f} µm, RMS {opd_results['lateral_rms']:.3f} µm
|
||||
|
||||
Significance:
|
||||
""")
|
||||
|
||||
if abs(delta_40) < 0.5 and abs(delta_60) < 0.5:
|
||||
print(" -> SMALL DIFFERENCE: For this design, lateral displacement is minimal.")
|
||||
print(" Both methods give similar results.")
|
||||
else:
|
||||
print(" -> SIGNIFICANT DIFFERENCE: Lateral displacement affects WFE computation.")
|
||||
print(" OPD method is more physically accurate for this geometry.")
|
||||
|
||||
if opd_results['lateral_rms'] > 0.1:
|
||||
print(f"\n WARNING: Lateral RMS {opd_results['lateral_rms']:.3f} µm is notable.")
|
||||
print(" OPD method recommended for accurate optimization.")
|
||||
208
tests/test_zernike_methods_comparison.py
Normal file
208
tests/test_zernike_methods_comparison.py
Normal file
@@ -0,0 +1,208 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Compare all Zernike extraction methods on M1 Mirror data.
|
||||
|
||||
Methods compared:
|
||||
1. Standard - Z-displacement only at original (x,y)
|
||||
2. Parabola OPD - Uses parabola approximation with prescription focal length
|
||||
3. Figure OPD - Uses actual figure.dat geometry (most rigorous)
|
||||
|
||||
Run:
|
||||
python tests/test_zernike_methods_comparison.py studies/M1_Mirror/m1_mirror_cost_reduction_V9
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def run_full_comparison(study_dir: Path):
|
||||
"""Run comparison of all three Zernike methods."""
|
||||
from optimization_engine.extractors.extract_zernike import ZernikeExtractor
|
||||
from optimization_engine.extractors.extract_zernike_opd import ZernikeAnalyticExtractor
|
||||
from optimization_engine.extractors.extract_zernike_figure import ZernikeOPDExtractor
|
||||
|
||||
# Find OP2 file
|
||||
op2_files = list(study_dir.glob('3_results/best_design_archive/**/*.op2'))
|
||||
if not op2_files:
|
||||
op2_files = list(study_dir.glob('2_iterations/iter1/*.op2'))
|
||||
if not op2_files:
|
||||
raise FileNotFoundError(f"No OP2 file found in {study_dir}")
|
||||
|
||||
op2_file = op2_files[0]
|
||||
|
||||
# Figure file is optional - extractor will use BDF geometry filtered to OP2 nodes
|
||||
figure_file = study_dir / '1_setup' / 'model' / 'figure.dat'
|
||||
use_figure_file = figure_file.exists()
|
||||
|
||||
print("=" * 80)
|
||||
print("ZERNIKE METHODS COMPARISON - M1 MIRROR")
|
||||
print("=" * 80)
|
||||
print(f"\nOP2 file: {op2_file.name}")
|
||||
if use_figure_file:
|
||||
print(f"Figure file: {figure_file.name}")
|
||||
else:
|
||||
print("Figure file: Not found (using BDF geometry filtered to OP2 nodes)")
|
||||
|
||||
# Initialize extractors
|
||||
print("\nInitializing extractors...")
|
||||
|
||||
std_extractor = ZernikeExtractor(op2_file)
|
||||
print(f" Standard: {len(std_extractor.node_geometry)} nodes in BDF")
|
||||
|
||||
analytic_extractor = ZernikeAnalyticExtractor(op2_file, focal_length=1445.0, concave=True)
|
||||
print(f" Analytic (Parabola f=1445mm): {len(analytic_extractor.node_geometry)} nodes")
|
||||
|
||||
# OPD extractor - ALWAYS use BDF geometry filtered to OP2 nodes (RECOMMENDED)
|
||||
# (figure.dat may have mismatched coordinates from different model state)
|
||||
opd_extractor = ZernikeOPDExtractor(
|
||||
op2_file,
|
||||
figure_path=None # Force use of BDF geometry
|
||||
)
|
||||
print(f" OPD (BDF geometry): {len(opd_extractor.figure_geometry)} figure nodes")
|
||||
|
||||
print()
|
||||
|
||||
# Get available subcases
|
||||
subcases = list(std_extractor.displacements.keys())
|
||||
print(f"Available subcases: {subcases}")
|
||||
|
||||
# Results table header
|
||||
print("\n" + "=" * 80)
|
||||
print(f"{'Subcase':<10} {'Standard':<15} {'Analytic':<15} {'OPD':<15} {'Max Lat (um)':<12}")
|
||||
print("-" * 80)
|
||||
|
||||
all_results = {}
|
||||
|
||||
for sc in subcases:
|
||||
try:
|
||||
# Extract with each method
|
||||
std_res = std_extractor.extract_subcase(sc)
|
||||
analytic_res = analytic_extractor.extract_subcase(sc)
|
||||
opd_res = opd_extractor.extract_subcase(sc)
|
||||
|
||||
std_rms = std_res['filtered_rms_nm']
|
||||
analytic_rms = analytic_res['filtered_rms_nm']
|
||||
opd_rms = opd_res['filtered_rms_nm']
|
||||
max_lat = opd_res.get('max_lateral_displacement_um', 0)
|
||||
|
||||
print(f"{sc:<10} {std_rms:<15.2f} {analytic_rms:<15.2f} {opd_rms:<15.2f} {max_lat:<12.3f}")
|
||||
|
||||
all_results[sc] = {
|
||||
'standard': std_res,
|
||||
'analytic': analytic_res,
|
||||
'opd': opd_res
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"{sc:<10} ERROR: {e}")
|
||||
|
||||
print("-" * 80)
|
||||
|
||||
# Detailed comparison for each subcase
|
||||
print("\n" + "=" * 80)
|
||||
print("DETAILED COMPARISON")
|
||||
print("=" * 80)
|
||||
|
||||
for sc, results in all_results.items():
|
||||
print(f"\n{'-' * 40}")
|
||||
print(f"SUBCASE {sc}")
|
||||
print(f"{'-' * 40}")
|
||||
|
||||
std = results['standard']
|
||||
analytic = results['analytic']
|
||||
opd = results['opd']
|
||||
|
||||
print(f"\n{'Metric':<25} {'Standard':<15} {'Analytic':<15} {'OPD':<15}")
|
||||
print("-" * 70)
|
||||
|
||||
# RMS metrics
|
||||
print(f"{'Filtered RMS (nm)':<25} {std['filtered_rms_nm']:<15.2f} {analytic['filtered_rms_nm']:<15.2f} {opd['filtered_rms_nm']:<15.2f}")
|
||||
print(f"{'Global RMS (nm)':<25} {std['global_rms_nm']:<15.2f} {analytic['global_rms_nm']:<15.2f} {opd['global_rms_nm']:<15.2f}")
|
||||
|
||||
# Aberrations
|
||||
for aberr in ['defocus', 'astigmatism', 'coma', 'trefoil', 'spherical']:
|
||||
key = f'{aberr}_nm'
|
||||
if key in std and key in analytic and key in opd:
|
||||
print(f"{aberr.capitalize():<25} {std[key]:<15.2f} {analytic[key]:<15.2f} {opd[key]:<15.2f}")
|
||||
|
||||
# Node count
|
||||
print(f"{'Nodes':<25} {std['n_nodes']:<15} {analytic['n_nodes']:<15} {opd['n_nodes']:<15}")
|
||||
|
||||
# Lateral displacement (only for OPD methods)
|
||||
print()
|
||||
print(f"Lateral displacement (OPD method):")
|
||||
print(f" Max: {opd.get('max_lateral_displacement_um', 0):.3f} um")
|
||||
print(f" RMS: {opd.get('rms_lateral_displacement_um', 0):.3f} um")
|
||||
print(f" Mean: {opd.get('mean_lateral_displacement_um', 0):.3f} um")
|
||||
|
||||
# Differences
|
||||
print()
|
||||
diff_std_opd = opd['filtered_rms_nm'] - std['filtered_rms_nm']
|
||||
diff_analytic_opd = opd['filtered_rms_nm'] - analytic['filtered_rms_nm']
|
||||
|
||||
print(f"Difference from Standard:")
|
||||
print(f" OPD: {diff_std_opd:+.2f} nm ({100*diff_std_opd/std['filtered_rms_nm']:+.1f}%)")
|
||||
print(f" Analytic: {analytic['filtered_rms_nm'] - std['filtered_rms_nm']:+.2f} nm")
|
||||
print()
|
||||
print(f"Difference OPD vs Analytic: {diff_analytic_opd:+.2f} nm")
|
||||
|
||||
# Tracking WFE analysis
|
||||
if '2' in all_results and '3' in all_results and '4' in all_results:
|
||||
print("\n" + "=" * 80)
|
||||
print("TRACKING WFE ANALYSIS (elevation changes)")
|
||||
print("=" * 80)
|
||||
|
||||
for method_name, method_key in [('Standard', 'standard'), ('Analytic', 'analytic'), ('OPD', 'opd')]:
|
||||
print(f"\n{method_name}:")
|
||||
|
||||
z20 = np.array(all_results['2'][method_key].get('coefficients', [0]*36))
|
||||
z40 = np.array(all_results['3'][method_key].get('coefficients', [0]*36))
|
||||
z60 = np.array(all_results['4'][method_key].get('coefficients', [0]*36))
|
||||
|
||||
if len(z20) > 4:
|
||||
# Differential (J5+)
|
||||
diff_40_20 = z40 - z20
|
||||
diff_60_20 = z60 - z20
|
||||
|
||||
rms_40_20 = np.sqrt(np.sum(diff_40_20[4:]**2))
|
||||
rms_60_20 = np.sqrt(np.sum(diff_60_20[4:]**2))
|
||||
|
||||
print(f" 40-20 deg tracking: {rms_40_20:.2f} nm RMS (J5+ filtered)")
|
||||
print(f" 60-20 deg tracking: {rms_60_20:.2f} nm RMS (J5+ filtered)")
|
||||
else:
|
||||
print(f" (coefficients not available)")
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print("SUMMARY")
|
||||
print("=" * 80)
|
||||
print("""
|
||||
Key findings:
|
||||
- Standard method: Uses Z-displacement only at original (x,y) - fast but ignores lateral shift
|
||||
- Analytic method: Accounts for lateral shift using parabola formula (requires focal length)
|
||||
- OPD method: Uses actual mesh geometry - MOST RIGOROUS, no shape assumption
|
||||
|
||||
Recommendation: Use OPD method (ZernikeOPDExtractor) for all mirror optimization.
|
||||
The Analytic method is useful for comparison against theoretical parabola.
|
||||
""")
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description='Compare Zernike extraction methods')
|
||||
parser.add_argument('study_dir', type=str, help='Path to study directory')
|
||||
args = parser.parse_args()
|
||||
|
||||
study_dir = Path(args.study_dir).resolve()
|
||||
if not study_dir.exists():
|
||||
print(f"Study directory not found: {study_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
run_full_comparison(study_dir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user