Major improvements to Zernike WFE visualization: - Add ZernikeDashboardInsight: Unified dashboard with all orientations (40°, 60°, 90°) on one page with light theme and executive summary - Add OPD method toggle: Switch between Standard (Z-only) and OPD (X,Y,Z) methods in ZernikeWFEInsight with interactive buttons - Add lateral displacement maps: Visualize X,Y displacement for each orientation - Add displacement component views: Toggle between WFE, ΔX, ΔY, ΔZ in relative views - Add metrics comparison table showing both methods side-by-side New extractors: - extract_zernike_figure.py: ZernikeOPDExtractor using BDF geometry interpolation - extract_zernike_opd.py: Parabola-based OPD with focal length Key finding: OPD method gives 8-11% higher WFE values than Standard method (more conservative/accurate for surfaces with lateral displacement under gravity) Documentation updates: - SYS_12: Added E22 ZernikeOPD as recommended method - SYS_16: Added ZernikeDashboard, updated ZernikeWFE with OPD features - Cheatsheet: Added Zernike method comparison table 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
151 lines
5.0 KiB
Python
151 lines
5.0 KiB
Python
"""Analyze V11 optimization results."""
|
|
import sqlite3
|
|
from pathlib import Path
|
|
import json
|
|
import sys
|
|
sys.path.insert(0, '.')
|
|
|
|
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11/3_results/study.db')
|
|
|
|
conn = sqlite3.connect(db_path)
|
|
c = conn.cursor()
|
|
|
|
# Get all completed trials with their objectives
|
|
c.execute('''
|
|
SELECT t.trial_id
|
|
FROM trials t
|
|
WHERE t.state = 'COMPLETE'
|
|
ORDER BY t.trial_id
|
|
''')
|
|
completed_ids = [row[0] for row in c.fetchall()]
|
|
print(f'Completed trials: {len(completed_ids)}')
|
|
|
|
# Build trial data
|
|
trials = []
|
|
for tid in completed_ids:
|
|
c.execute('''
|
|
SELECT key, value_json
|
|
FROM trial_user_attributes
|
|
WHERE trial_id = ?
|
|
''', (tid,))
|
|
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
|
|
|
|
if 'rel_filtered_rms_40_vs_20' in attrs:
|
|
trials.append({
|
|
'id': tid,
|
|
'rms_40': attrs.get('rel_filtered_rms_40_vs_20', 999),
|
|
'rms_60': attrs.get('rel_filtered_rms_60_vs_20', 999),
|
|
'mfg_90': attrs.get('mfg_90_optician_workload', 999),
|
|
'ws': attrs.get('weighted_sum', 999),
|
|
'lateral': attrs.get('lateral_rms_um', 0),
|
|
})
|
|
|
|
# Sort by weighted sum or calculate it
|
|
for t in trials:
|
|
if t['ws'] == 999:
|
|
# Calculate weighted sum
|
|
w40 = 100 / 4.0 # Target 4 nm
|
|
w60 = 50 / 10.0 # Target 10 nm
|
|
w90 = 20 / 20.0 # Target 20 nm
|
|
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
|
|
|
|
# Find best
|
|
trials.sort(key=lambda x: x['ws'])
|
|
best = trials[0] if trials else None
|
|
|
|
print('\nV11 Optimization Summary')
|
|
print('=' * 70)
|
|
print(f'Completed trials: {len(trials)}')
|
|
|
|
if trials:
|
|
rms40 = [t['rms_40'] for t in trials]
|
|
rms60 = [t['rms_60'] for t in trials]
|
|
mfg90 = [t['mfg_90'] for t in trials]
|
|
|
|
print(f'\n40-20 RMS range: {min(rms40):.2f} - {max(rms40):.2f} nm (target: 4.0 nm)')
|
|
print(f'60-20 RMS range: {min(rms60):.2f} - {max(rms60):.2f} nm (target: 10.0 nm)')
|
|
print(f'90 MFG range: {min(mfg90):.2f} - {max(mfg90):.2f} nm (target: 20.0 nm)')
|
|
|
|
print(f'\nBest Trial (#{best["id"]}):')
|
|
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
|
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
|
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
|
print(f' Lateral: {best["lateral"]:.3f} um')
|
|
print(f' WS: {best["ws"]:.1f}')
|
|
|
|
# Check if values make sense (not too good to be true)
|
|
print('\nSanity Check:')
|
|
if best['rms_40'] < 3.0:
|
|
print(' WARNING: 40-20 RMS suspiciously low!')
|
|
else:
|
|
print(f' 40-20 RMS {best["rms_40"]:.2f} nm - looks reasonable (expected ~6-7nm)')
|
|
|
|
if best['rms_60'] < 8.0:
|
|
print(' WARNING: 60-20 RMS suspiciously low!')
|
|
else:
|
|
print(f' 60-20 RMS {best["rms_60"]:.2f} nm - looks reasonable (expected ~13-15nm)')
|
|
|
|
# Compare to V7 baseline and V10 buggy values
|
|
print('\n' + '=' * 70)
|
|
print('Comparison to Known Values:')
|
|
print('=' * 70)
|
|
print('\nV7 Baseline (original Zernike, correct):')
|
|
print(' 40-20 RMS: 6.05 nm')
|
|
print(' 60-20 RMS: 13.03 nm')
|
|
print(' MFG 90: 26.34 nm')
|
|
|
|
print('\nV10 BUGGY (abs(RMS_target - RMS_ref) - WRONG):')
|
|
print(' 40-20 RMS: ~1.99 nm <- WAY TOO LOW')
|
|
print(' 60-20 RMS: ~6.82 nm <- WAY TOO LOW')
|
|
|
|
print('\nV11 Best (extract_relative() - CORRECT):')
|
|
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
|
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
|
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
|
|
|
# Verdict
|
|
print('\n' + '=' * 70)
|
|
print('VERDICT:')
|
|
print('=' * 70)
|
|
if best['rms_40'] > 5.0 and best['rms_40'] < 10.0:
|
|
print(' 40-20: CORRECT - matches expected V7 range')
|
|
else:
|
|
print(f' 40-20: INVESTIGATE - {best["rms_40"]:.2f} nm is outside expected range')
|
|
|
|
if best['rms_60'] > 10.0 and best['rms_60'] < 20.0:
|
|
print(' 60-20: CORRECT - matches expected V7 range')
|
|
else:
|
|
print(f' 60-20: INVESTIGATE - {best["rms_60"]:.2f} nm is outside expected range')
|
|
|
|
conn.close()
|
|
|
|
# Now generate Zernike dashboard for visual verification
|
|
print('\n' + '=' * 70)
|
|
print('Generating Zernike Dashboard for Best Iteration...')
|
|
print('=' * 70)
|
|
|
|
from optimization_engine.insights import ZernikeDashboardInsight, InsightConfig
|
|
|
|
study_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11')
|
|
|
|
# Find the iteration with the best design
|
|
best_archive = study_path / '3_results' / 'best_design_archive'
|
|
if best_archive.exists():
|
|
op2_files = list(best_archive.glob('*.op2'))
|
|
if op2_files:
|
|
print(f'Found best design archive: {op2_files[0].name}')
|
|
|
|
insight = ZernikeDashboardInsight(study_path)
|
|
if insight.can_generate():
|
|
print('Generating dashboard...')
|
|
result = insight.generate(InsightConfig())
|
|
if result.success:
|
|
print(f'\nDashboard generated: {result.html_path}')
|
|
print(f'\nDashboard Summary:')
|
|
for key, value in result.summary.items():
|
|
print(f' {key}: {value}')
|
|
else:
|
|
print(f'Error: {result.error}')
|
|
else:
|
|
print('Cannot generate insight - no OP2 files found')
|