chore: Project cleanup and Canvas UX improvements (Phase 7-9)

## Cleanup (v0.5.0)
- Delete 102+ orphaned MCP session temp files
- Remove build artifacts (htmlcov, dist, __pycache__)
- Archive superseded plan docs (RALPH_LOOP V2/V3, CANVAS V3, etc.)
- Move debug/analysis scripts from tests/ to tools/analysis/
- Archive redundant NX journals to archive/nx_journals/
- Archive monolithic PROTOCOL.md to docs/archive/
- Update .gitignore with missing patterns
- Clean old study files (optimization_log_old.txt, run_optimization_old.py)

## Canvas UX (Phases 7-9)
- Phase 7: Resizable panels with localStorage persistence
  - Left sidebar: 200-400px, Right panel: 280-600px
  - New useResizablePanel hook and ResizeHandle component
- Phase 8: Enable all palette items
  - All 8 node types now draggable
  - Singleton logic for model/solver/algorithm/surrogate
- Phase 9: Solver configuration
  - Add SolverEngine type (nxnastran, mscnastran, python, etc.)
  - Add NastranSolutionType (SOL101-SOL200)
  - Engine/solution dropdowns in config panel
  - Python script path support

## Documentation
- Update CHANGELOG.md with recent versions
- Update docs/00_INDEX.md
- Create examples/README.md
- Add docs/plans/CANVAS_UX_IMPROVEMENTS.md
This commit is contained in:
2026-01-24 15:17:34 -05:00
parent 2cb8dccc3a
commit a3f18dc377
38 changed files with 1172 additions and 2570 deletions

View File

@@ -0,0 +1,150 @@
"""Analyze V11 optimization results."""
import sqlite3
from pathlib import Path
import json
import sys
sys.path.insert(0, '.')
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11/3_results/study.db')
conn = sqlite3.connect(db_path)
c = conn.cursor()
# Get all completed trials with their objectives
c.execute('''
SELECT t.trial_id
FROM trials t
WHERE t.state = 'COMPLETE'
ORDER BY t.trial_id
''')
completed_ids = [row[0] for row in c.fetchall()]
print(f'Completed trials: {len(completed_ids)}')
# Build trial data
trials = []
for tid in completed_ids:
c.execute('''
SELECT key, value_json
FROM trial_user_attributes
WHERE trial_id = ?
''', (tid,))
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
if 'rel_filtered_rms_40_vs_20' in attrs:
trials.append({
'id': tid,
'rms_40': attrs.get('rel_filtered_rms_40_vs_20', 999),
'rms_60': attrs.get('rel_filtered_rms_60_vs_20', 999),
'mfg_90': attrs.get('mfg_90_optician_workload', 999),
'ws': attrs.get('weighted_sum', 999),
'lateral': attrs.get('lateral_rms_um', 0),
})
# Sort by weighted sum or calculate it
for t in trials:
if t['ws'] == 999:
# Calculate weighted sum
w40 = 100 / 4.0 # Target 4 nm
w60 = 50 / 10.0 # Target 10 nm
w90 = 20 / 20.0 # Target 20 nm
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
# Find best
trials.sort(key=lambda x: x['ws'])
best = trials[0] if trials else None
print('\nV11 Optimization Summary')
print('=' * 70)
print(f'Completed trials: {len(trials)}')
if trials:
rms40 = [t['rms_40'] for t in trials]
rms60 = [t['rms_60'] for t in trials]
mfg90 = [t['mfg_90'] for t in trials]
print(f'\n40-20 RMS range: {min(rms40):.2f} - {max(rms40):.2f} nm (target: 4.0 nm)')
print(f'60-20 RMS range: {min(rms60):.2f} - {max(rms60):.2f} nm (target: 10.0 nm)')
print(f'90 MFG range: {min(mfg90):.2f} - {max(mfg90):.2f} nm (target: 20.0 nm)')
print(f'\nBest Trial (#{best["id"]}):')
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
print(f' Lateral: {best["lateral"]:.3f} um')
print(f' WS: {best["ws"]:.1f}')
# Check if values make sense (not too good to be true)
print('\nSanity Check:')
if best['rms_40'] < 3.0:
print(' WARNING: 40-20 RMS suspiciously low!')
else:
print(f' 40-20 RMS {best["rms_40"]:.2f} nm - looks reasonable (expected ~6-7nm)')
if best['rms_60'] < 8.0:
print(' WARNING: 60-20 RMS suspiciously low!')
else:
print(f' 60-20 RMS {best["rms_60"]:.2f} nm - looks reasonable (expected ~13-15nm)')
# Compare to V7 baseline and V10 buggy values
print('\n' + '=' * 70)
print('Comparison to Known Values:')
print('=' * 70)
print('\nV7 Baseline (original Zernike, correct):')
print(' 40-20 RMS: 6.05 nm')
print(' 60-20 RMS: 13.03 nm')
print(' MFG 90: 26.34 nm')
print('\nV10 BUGGY (abs(RMS_target - RMS_ref) - WRONG):')
print(' 40-20 RMS: ~1.99 nm <- WAY TOO LOW')
print(' 60-20 RMS: ~6.82 nm <- WAY TOO LOW')
print('\nV11 Best (extract_relative() - CORRECT):')
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
# Verdict
print('\n' + '=' * 70)
print('VERDICT:')
print('=' * 70)
if best['rms_40'] > 5.0 and best['rms_40'] < 10.0:
print(' 40-20: CORRECT - matches expected V7 range')
else:
print(f' 40-20: INVESTIGATE - {best["rms_40"]:.2f} nm is outside expected range')
if best['rms_60'] > 10.0 and best['rms_60'] < 20.0:
print(' 60-20: CORRECT - matches expected V7 range')
else:
print(f' 60-20: INVESTIGATE - {best["rms_60"]:.2f} nm is outside expected range')
conn.close()
# Now generate Zernike dashboard for visual verification
print('\n' + '=' * 70)
print('Generating Zernike Dashboard for Best Iteration...')
print('=' * 70)
from optimization_engine.insights import ZernikeDashboardInsight, InsightConfig
study_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11')
# Find the iteration with the best design
best_archive = study_path / '3_results' / 'best_design_archive'
if best_archive.exists():
op2_files = list(best_archive.glob('*.op2'))
if op2_files:
print(f'Found best design archive: {op2_files[0].name}')
insight = ZernikeDashboardInsight(study_path)
if insight.can_generate():
print('Generating dashboard...')
result = insight.generate(InsightConfig())
if result.success:
print(f'\nDashboard generated: {result.html_path}')
print(f'\nDashboard Summary:')
for key, value in result.summary.items():
print(f' {key}: {value}')
else:
print(f'Error: {result.error}')
else:
print('Cannot generate insight - no OP2 files found')

View File

@@ -0,0 +1,74 @@
"""Verify the V10 fix - compare Standard extract_relative vs OPD extract_relative."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.extractors import ZernikeExtractor, ZernikeOPDExtractor
op2 = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V10/2_iterations/iter1/assy_m1_assyfem1_sim1-solution_1.op2')
print("="*70)
print("VERIFICATION: ZernikeOPDExtractor.extract_relative() vs Standard")
print("="*70)
print()
# Standard extractor
extractor_std = ZernikeExtractor(op2, n_modes=50, filter_orders=4)
# OPD extractor (with XY lateral correction)
extractor_opd = ZernikeOPDExtractor(op2, n_modes=50, filter_orders=4)
print("Standard ZernikeExtractor.extract_relative():")
rel_40_std = extractor_std.extract_relative('3', '2')
rel_60_std = extractor_std.extract_relative('4', '2')
rel_90_std = extractor_std.extract_relative('1', '2')
print(f" 40-20: {rel_40_std['relative_filtered_rms_nm']:.2f} nm")
print(f" 60-20: {rel_60_std['relative_filtered_rms_nm']:.2f} nm")
print(f" 90-20 (j1to3): {rel_90_std['relative_rms_filter_j1to3']:.2f} nm")
print()
print("NEW ZernikeOPDExtractor.extract_relative() (with XY lateral correction):")
rel_40_opd = extractor_opd.extract_relative('3', '2')
rel_60_opd = extractor_opd.extract_relative('4', '2')
rel_90_opd = extractor_opd.extract_relative('1', '2')
print(f" 40-20: {rel_40_opd['relative_filtered_rms_nm']:.2f} nm")
print(f" 60-20: {rel_60_opd['relative_filtered_rms_nm']:.2f} nm")
print(f" 90-20 (j1to3): {rel_90_opd['relative_rms_filter_j1to3']:.2f} nm")
print()
print("Lateral displacement diagnostics (OPD method):")
print(f" Max lateral: {rel_40_opd['max_lateral_displacement_um']:.3f} um")
print(f" RMS lateral: {rel_40_opd['rms_lateral_displacement_um']:.3f} um")
print()
print("="*70)
print("COMPARISON")
print("="*70)
print()
print(f"{'Metric':<20} | {'Standard':<12} | {'OPD':<12} | {'Diff %':<10}")
print("-"*60)
def pct_diff(a, b):
return 100.0 * (b - a) / a if a > 0 else 0
print(f"{'40-20 (nm)':<20} | {rel_40_std['relative_filtered_rms_nm']:>12.2f} | {rel_40_opd['relative_filtered_rms_nm']:>12.2f} | {pct_diff(rel_40_std['relative_filtered_rms_nm'], rel_40_opd['relative_filtered_rms_nm']):>+10.1f}%")
print(f"{'60-20 (nm)':<20} | {rel_60_std['relative_filtered_rms_nm']:>12.2f} | {rel_60_opd['relative_filtered_rms_nm']:>12.2f} | {pct_diff(rel_60_std['relative_filtered_rms_nm'], rel_60_opd['relative_filtered_rms_nm']):>+10.1f}%")
print(f"{'90-20 j1to3 (nm)':<20} | {rel_90_std['relative_rms_filter_j1to3']:>12.2f} | {rel_90_opd['relative_rms_filter_j1to3']:>12.2f} | {pct_diff(rel_90_std['relative_rms_filter_j1to3'], rel_90_opd['relative_rms_filter_j1to3']):>+10.1f}%")
print()
print("="*70)
print("WHAT V9 REPORTED (for comparison)")
print("="*70)
print(" 40-20: 6.10 nm (from DB)")
print(" 60-20: 12.76 nm (from DB)")
print()
print("V10 SHOULD NOW REPORT (using OPD extract_relative):")
print(f" 40-20: {rel_40_opd['relative_filtered_rms_nm']:.2f} nm")
print(f" 60-20: {rel_60_opd['relative_filtered_rms_nm']:.2f} nm")
print(f" 90-20: {rel_90_opd['relative_rms_filter_j1to3']:.2f} nm")
print()
print("V10 OLD WRONG VALUES WERE:")
print(" 40-20: 1.99 nm (WRONG - was computing abs(RMS_target - RMS_ref))")
print(" 60-20: 6.82 nm (WRONG)")
print()
print("FIX VERIFIED: OPD extract_relative() correctly computes RMS of (WFE_target - WFE_ref)")

View File

@@ -0,0 +1,72 @@
"""Compare V9 vs V10 calculation methods."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.extractors import ZernikeExtractor
op2 = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V10/2_iterations/iter1/assy_m1_assyfem1_sim1-solution_1.op2')
extractor = ZernikeExtractor(op2, n_modes=50, filter_orders=4)
print("="*70)
print("CRITICAL: V9 vs V10 Calculation Method Comparison")
print("="*70)
print()
# This is what V9 does - computes relative WFE THEN fits Zernike
rel_40 = extractor.extract_relative('3', '2')
rel_60 = extractor.extract_relative('4', '2')
rel_90 = extractor.extract_relative('1', '2')
print('V9 method (ZernikeExtractor.extract_relative):')
print(' Computes WFE_diff = WFE_target - WFE_ref node-by-node')
print(' Then fits Zernike to WFE_diff')
print()
print(f' 40-20: {rel_40["relative_filtered_rms_nm"]:.2f} nm')
print(f' 60-20: {rel_60["relative_filtered_rms_nm"]:.2f} nm')
print(f' 90-20 (j1to3): {rel_90["relative_rms_filter_j1to3"]:.2f} nm')
# Individual absolute values
r20 = extractor.extract_subcase('2')
r40 = extractor.extract_subcase('3')
r60 = extractor.extract_subcase('4')
r90 = extractor.extract_subcase('1')
print()
print('='*70)
print('Individual absolute RMS values:')
print('='*70)
print(f' 20 deg: {r20["filtered_rms_nm"]:.2f} nm')
print(f' 40 deg: {r40["filtered_rms_nm"]:.2f} nm')
print(f' 60 deg: {r60["filtered_rms_nm"]:.2f} nm')
print(f' 90 deg: {r90["filtered_rms_nm"]:.2f} nm')
print()
print('='*70)
print('V10 method (WRONG - difference of RMS values):')
print(' Computes RMS_target - RMS_ref')
print(' This is NOT the same as RMS of the difference!')
print('='*70)
print()
print(f' 40-20: {r40["filtered_rms_nm"] - r20["filtered_rms_nm"]:.2f} nm')
print(f' 60-20: {r60["filtered_rms_nm"] - r20["filtered_rms_nm"]:.2f} nm')
print(f' After abs(): {abs(r40["filtered_rms_nm"] - r20["filtered_rms_nm"]):.2f} nm')
print(f' After abs(): {abs(r60["filtered_rms_nm"] - r20["filtered_rms_nm"]):.2f} nm')
print()
print('='*70)
print('CONCLUSION')
print('='*70)
print()
print('V10 BUG: Computes abs(RMS_target - RMS_ref) instead of RMS(WFE_target - WFE_ref)')
print()
print('The CORRECT relative WFE (from V9 method):')
print(f' 40-20: {rel_40["relative_filtered_rms_nm"]:.2f} nm')
print(f' 60-20: {rel_60["relative_filtered_rms_nm"]:.2f} nm')
print(f' 90-20: {rel_90["relative_rms_filter_j1to3"]:.2f} nm')
print()
print('The WRONG values V10 reports:')
print(f' 40-20: {abs(r40["filtered_rms_nm"] - r20["filtered_rms_nm"]):.2f} nm')
print(f' 60-20: {abs(r60["filtered_rms_nm"] - r20["filtered_rms_nm"]):.2f} nm')
print()
print('V10 values are ~3-4x LOWER than correct values!')

View File

@@ -0,0 +1,143 @@
"""Audit V10 WFE values - independent verification."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.extractors import ZernikeOPDExtractor, ZernikeExtractor
print('='*70)
print('AUDIT: V10 WFE Values - Independent Verification')
print('='*70)
# V10 iter1 (baseline trial)
op2_v10 = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V10/2_iterations/iter1/assy_m1_assyfem1_sim1-solution_1.op2')
if not op2_v10.exists():
print('V10 OP2 file not found!')
sys.exit(1)
print(f'OP2 file: {op2_v10}')
print(f'Size: {op2_v10.stat().st_size / 1024 / 1024:.1f} MB')
# Test with ZernikeOPDExtractor (what V10 uses)
print()
print('='*70)
print('Method 1: ZernikeOPDExtractor (what V10 uses)')
print('='*70)
extractor_opd = ZernikeOPDExtractor(op2_v10, n_modes=50, filter_orders=4)
result_20_opd = extractor_opd.extract_subcase('2') # Reference
result_40_opd = extractor_opd.extract_subcase('3') # 40 deg
result_60_opd = extractor_opd.extract_subcase('4') # 60 deg
result_90_opd = extractor_opd.extract_subcase('1') # 90 deg MFG
print()
print('ABSOLUTE values (ZernikeOPD):')
print(f' 20 deg: filtered_rms = {result_20_opd["filtered_rms_nm"]:.2f} nm')
print(f' 40 deg: filtered_rms = {result_40_opd["filtered_rms_nm"]:.2f} nm')
print(f' 60 deg: filtered_rms = {result_60_opd["filtered_rms_nm"]:.2f} nm')
print(f' 90 deg: filtered_rms = {result_90_opd["filtered_rms_nm"]:.2f} nm')
print()
print('RELATIVE values (target - ref) as V10 computes:')
rel_40_opd = result_40_opd['filtered_rms_nm'] - result_20_opd['filtered_rms_nm']
rel_60_opd = result_60_opd['filtered_rms_nm'] - result_20_opd['filtered_rms_nm']
rel_mfg_opd = result_90_opd['rms_filter_j1to3_nm'] - result_20_opd['rms_filter_j1to3_nm']
print(f' 40-20: {rel_40_opd:.2f} nm (abs: {abs(rel_40_opd):.2f})')
print(f' 60-20: {rel_60_opd:.2f} nm (abs: {abs(rel_60_opd):.2f})')
print(f' 90-20 (j1to3): {rel_mfg_opd:.2f} nm (abs: {abs(rel_mfg_opd):.2f})')
print()
print('V10 uses abs() -> stores:')
print(f' rel_filtered_rms_40_vs_20: {abs(rel_40_opd):.2f}')
print(f' rel_filtered_rms_60_vs_20: {abs(rel_60_opd):.2f}')
print(f' mfg_90_optician_workload: {abs(rel_mfg_opd):.2f}')
# Test with Standard ZernikeExtractor (what V9 uses)
print()
print('='*70)
print('Method 2: Standard ZernikeExtractor (what V9 likely uses)')
print('='*70)
# Find the BDF file
bdf_files = list(op2_v10.parent.glob('*.dat'))
bdf_path = bdf_files[0] if bdf_files else None
print(f'BDF file: {bdf_path}')
extractor_std = ZernikeExtractor(op2_v10, bdf_path=bdf_path, n_modes=50, filter_orders=4)
result_20_std = extractor_std.extract_subcase('2')
result_40_std = extractor_std.extract_subcase('3')
result_60_std = extractor_std.extract_subcase('4')
result_90_std = extractor_std.extract_subcase('1')
print()
print('ABSOLUTE values (Standard Z-only):')
print(f' 20 deg: filtered_rms = {result_20_std["filtered_rms_nm"]:.2f} nm')
print(f' 40 deg: filtered_rms = {result_40_std["filtered_rms_nm"]:.2f} nm')
print(f' 60 deg: filtered_rms = {result_60_std["filtered_rms_nm"]:.2f} nm')
print(f' 90 deg: filtered_rms = {result_90_std["filtered_rms_nm"]:.2f} nm')
print()
print('RELATIVE values (Standard):')
rel_40_std = result_40_std['filtered_rms_nm'] - result_20_std['filtered_rms_nm']
rel_60_std = result_60_std['filtered_rms_nm'] - result_20_std['filtered_rms_nm']
print(f' 40-20: {rel_40_std:.2f} nm (abs: {abs(rel_40_std):.2f})')
print(f' 60-20: {rel_60_std:.2f} nm (abs: {abs(rel_60_std):.2f})')
# Compare
print()
print('='*70)
print('COMPARISON: OPD vs Standard')
print('='*70)
print()
print(f'40-20: OPD={abs(rel_40_opd):.2f} nm vs Standard={abs(rel_40_std):.2f} nm')
print(f'60-20: OPD={abs(rel_60_opd):.2f} nm vs Standard={abs(rel_60_std):.2f} nm')
print()
print('Lateral displacement (OPD method):')
print(f' Max: {result_40_opd.get("max_lateral_displacement_um", 0):.3f} um')
print(f' RMS: {result_40_opd.get("rms_lateral_displacement_um", 0):.3f} um')
# Now check what V9 reports
print()
print('='*70)
print('V9 COMPARISON (iter12 from best archive)')
print('='*70)
op2_v9 = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V9/2_iterations/iter12/assy_m1_assyfem1_sim1-solution_1.op2')
if op2_v9.exists():
extractor_v9_opd = ZernikeOPDExtractor(op2_v9, n_modes=50, filter_orders=4)
extractor_v9_std = ZernikeExtractor(op2_v9, n_modes=50, filter_orders=4)
r20_v9_opd = extractor_v9_opd.extract_subcase('2')
r40_v9_opd = extractor_v9_opd.extract_subcase('3')
r60_v9_opd = extractor_v9_opd.extract_subcase('4')
r20_v9_std = extractor_v9_std.extract_subcase('2')
r40_v9_std = extractor_v9_std.extract_subcase('3')
r60_v9_std = extractor_v9_std.extract_subcase('4')
rel_40_v9_opd = abs(r40_v9_opd['filtered_rms_nm'] - r20_v9_opd['filtered_rms_nm'])
rel_60_v9_opd = abs(r60_v9_opd['filtered_rms_nm'] - r20_v9_opd['filtered_rms_nm'])
rel_40_v9_std = abs(r40_v9_std['filtered_rms_nm'] - r20_v9_std['filtered_rms_nm'])
rel_60_v9_std = abs(r60_v9_std['filtered_rms_nm'] - r20_v9_std['filtered_rms_nm'])
print()
print('V9 iter12 relative values:')
print(f' 40-20: OPD={rel_40_v9_opd:.2f} nm vs Standard={rel_40_v9_std:.2f} nm')
print(f' 60-20: OPD={rel_60_v9_opd:.2f} nm vs Standard={rel_60_v9_std:.2f} nm')
else:
print('V9 OP2 not found')
print()
print('='*70)
print('SUMMARY')
print('='*70)
print()
print('V10 reports: 40-20=1.99nm, 60-20=6.82nm (using OPD method)')
print('V9 reports: 40-20=6.10nm, 60-20=12.76nm (likely Standard method)')
print()
print('If both studies have SIMILAR geometry, the OPD method should NOT')
print('give such dramatically different values. This needs investigation.')

View File

@@ -0,0 +1,20 @@
"""Check API routes from running backend."""
import requests
import json
# Get OpenAPI spec
resp = requests.get("http://localhost:8000/openapi.json", timeout=10)
spec = resp.json()
# Find insight routes
print("Insight-related routes:")
print("=" * 60)
for path in sorted(spec.get("paths", {}).keys()):
if "insight" in path.lower():
print(f" {path}")
print()
print("All routes:")
print("-" * 60)
for path in sorted(spec.get("paths", {}).keys()):
print(f" {path}")

View File

@@ -0,0 +1,252 @@
"""
Compare V8 Best Candidate: OPD Method vs Standard Z-Only Method
This script extracts WFE using both methods and compares the results
to quantify the difference between using full X,Y,Z displacement (OPD)
vs just Z displacement (Standard).
"""
import sys
sys.path.insert(0, '.')
import sqlite3
from pathlib import Path
import json
import numpy as np
# Find V8 best trial
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V8/3_results/study.db')
print(f"V8 Database: {db_path}")
print(f"Exists: {db_path.exists()}")
if not db_path.exists():
print("ERROR: V8 database not found!")
sys.exit(1)
conn = sqlite3.connect(db_path)
c = conn.cursor()
# Get all completed trials with their objectives
c.execute('''
SELECT t.trial_id
FROM trials t
WHERE t.state = 'COMPLETE'
ORDER BY t.trial_id
''')
completed_ids = [row[0] for row in c.fetchall()]
print(f"Completed trials: {len(completed_ids)}")
# Build trial data and find best
trials = []
for tid in completed_ids:
c.execute('''
SELECT key, value_json
FROM trial_user_attributes
WHERE trial_id = ?
''', (tid,))
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
# V8 used different objective names - check what's available
rms_40 = attrs.get('rel_filtered_rms_40_vs_20', attrs.get('filtered_rms_40_20', None))
rms_60 = attrs.get('rel_filtered_rms_60_vs_20', attrs.get('filtered_rms_60_20', None))
mfg_90 = attrs.get('mfg_90_optician_workload', attrs.get('optician_workload_90', None))
ws = attrs.get('weighted_sum', None)
if rms_40 is not None:
trials.append({
'id': tid,
'rms_40': rms_40,
'rms_60': rms_60 if rms_60 else 999,
'mfg_90': mfg_90 if mfg_90 else 999,
'ws': ws if ws else 999,
})
conn.close()
if not trials:
# Check what keys are available
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT DISTINCT key FROM trial_user_attributes LIMIT 20')
keys = [row[0] for row in c.fetchall()]
print(f"\nAvailable attribute keys: {keys}")
conn.close()
print("ERROR: No trials found with expected objective names!")
sys.exit(1)
# Calculate weighted sum if not present
for t in trials:
if t['ws'] == 999:
w40 = 100 / 4.0
w60 = 50 / 10.0
w90 = 20 / 20.0
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
# Find best
trials.sort(key=lambda x: x['ws'])
best = trials[0]
print(f"\n{'='*70}")
print("V8 Best Trial Summary")
print('='*70)
print(f"Best Trial: #{best['id']}")
print(f" 40-20 RMS: {best['rms_40']:.2f} nm")
print(f" 60-20 RMS: {best['rms_60']:.2f} nm")
print(f" MFG 90: {best['mfg_90']:.2f} nm")
print(f" WS: {best['ws']:.1f}")
# Now compare both extraction methods on this trial
iter_path = Path(f'studies/M1_Mirror/m1_mirror_cost_reduction_V8/2_iterations/iter{best["id"]}')
op2_path = iter_path / 'assy_m1_assyfem1_sim1-solution_1.op2'
geo_path = iter_path / 'assy_m1_assyfem1_sim1-solution_1.dat'
print(f"\nIteration path: {iter_path}")
print(f"OP2 exists: {op2_path.exists()}")
print(f"Geometry exists: {geo_path.exists()}")
if not op2_path.exists():
print("ERROR: OP2 file not found for best trial!")
sys.exit(1)
print(f"\n{'='*70}")
print("Comparing Zernike Methods: OPD vs Standard")
print('='*70)
# Import extractors
from optimization_engine.extractors.extract_zernike_figure import ZernikeOPDExtractor
from optimization_engine.extractors.extract_zernike import ZernikeExtractor
# Standard method (Z-only)
print("\n1. STANDARD METHOD (Z-only displacement)")
print("-" * 50)
try:
std_extractor = ZernikeExtractor(
op2_path,
bdf_path=geo_path,
n_modes=50,
filter_orders=4
)
# Extract relative WFE
std_40_20 = std_extractor.extract_relative(target_subcase='3', reference_subcase='2')
std_60_20 = std_extractor.extract_relative(target_subcase='4', reference_subcase='2')
# MFG uses J1-J3 filter - need new extractor instance
std_extractor_mfg = ZernikeExtractor(
op2_path,
bdf_path=geo_path,
n_modes=50,
filter_orders=3 # J1-J3 for manufacturing
)
std_90 = std_extractor_mfg.extract_subcase(subcase_label='1')
print(f" 40-20 Relative RMS: {std_40_20['relative_filtered_rms_nm']:.2f} nm")
print(f" 60-20 Relative RMS: {std_60_20['relative_filtered_rms_nm']:.2f} nm")
print(f" 90 MFG (J1-J3): {std_90['filtered_rms_nm']:.2f} nm")
std_results = {
'40_20': std_40_20['relative_filtered_rms_nm'],
'60_20': std_60_20['relative_filtered_rms_nm'],
'90_mfg': std_90['filtered_rms_nm'],
}
except Exception as e:
print(f" ERROR: {e}")
import traceback
traceback.print_exc()
std_results = None
# OPD method (X,Y,Z displacement with mesh interpolation)
print("\n2. OPD METHOD (X,Y,Z displacement with mesh interpolation)")
print("-" * 50)
try:
opd_extractor = ZernikeOPDExtractor(
op2_path,
bdf_path=geo_path,
n_modes=50,
filter_orders=4
)
# Extract relative WFE using OPD method
opd_40_20 = opd_extractor.extract_relative(target_subcase='3', reference_subcase='2')
opd_60_20 = opd_extractor.extract_relative(target_subcase='4', reference_subcase='2')
# MFG uses J1-J3 filter
opd_extractor_mfg = ZernikeOPDExtractor(
op2_path,
bdf_path=geo_path,
n_modes=50,
filter_orders=3 # J1-J3 for manufacturing
)
opd_90 = opd_extractor_mfg.extract_subcase(subcase_label='1')
print(f" 40-20 Relative RMS: {opd_40_20['relative_filtered_rms_nm']:.2f} nm")
print(f" 60-20 Relative RMS: {opd_60_20['relative_filtered_rms_nm']:.2f} nm")
print(f" 90 MFG (J1-J3): {opd_90['filtered_rms_nm']:.2f} nm")
# Also get lateral displacement info
print(f"\n Lateral Displacement (40° vs 20°):")
print(f" Max: {opd_40_20.get('max_lateral_displacement_um', 'N/A')} µm")
print(f" RMS: {opd_40_20.get('rms_lateral_displacement_um', 'N/A')} µm")
opd_results = {
'40_20': opd_40_20['relative_filtered_rms_nm'],
'60_20': opd_60_20['relative_filtered_rms_nm'],
'90_mfg': opd_90['filtered_rms_nm'],
'lateral_max': opd_40_20.get('max_lateral_displacement_um', 0),
'lateral_rms': opd_40_20.get('rms_lateral_displacement_um', 0),
}
except Exception as e:
print(f" ERROR: {e}")
import traceback
traceback.print_exc()
opd_results = None
# Comparison
if std_results and opd_results:
print(f"\n{'='*70}")
print("COMPARISON: OPD vs Standard Method")
print('='*70)
print(f"\n{'Metric':<25} {'Standard':<12} {'OPD':<12} {'Delta':<12} {'Delta %':<10}")
print("-" * 70)
for key, label in [('40_20', '40-20 RMS (nm)'), ('60_20', '60-20 RMS (nm)'), ('90_mfg', '90 MFG (nm)')]:
std_val = std_results[key]
opd_val = opd_results[key]
delta = opd_val - std_val
delta_pct = 100 * delta / std_val if std_val > 0 else 0
print(f"{label:<25} {std_val:<12.2f} {opd_val:<12.2f} {delta:+<12.2f} {delta_pct:+.1f}%")
print(f"\n{'='*70}")
print("INTERPRETATION")
print('='*70)
delta_40 = opd_results['40_20'] - std_results['40_20']
delta_60 = opd_results['60_20'] - std_results['60_20']
print(f"""
The OPD method accounts for lateral (X,Y) displacement when computing WFE.
For telescope mirrors with lateral supports:
- Gravity causes the mirror to shift laterally (X,Y) as well as sag (Z)
- The Standard method ignores this lateral shift
- The OPD method interpolates the ideal surface at deformed (x+dx, y+dy) positions
Key observations:
- 40-20 difference: {delta_40:+.2f} nm ({100*delta_40/std_results['40_20']:+.1f}%)
- 60-20 difference: {delta_60:+.2f} nm ({100*delta_60/std_results['60_20']:+.1f}%)
- Lateral displacement: Max {opd_results['lateral_max']:.3f} µm, RMS {opd_results['lateral_rms']:.3f} µm
Significance:
""")
if abs(delta_40) < 0.5 and abs(delta_60) < 0.5:
print(" -> SMALL DIFFERENCE: For this design, lateral displacement is minimal.")
print(" Both methods give similar results.")
else:
print(" -> SIGNIFICANT DIFFERENCE: Lateral displacement affects WFE computation.")
print(" OPD method is more physically accurate for this geometry.")
if opd_results['lateral_rms'] > 0.1:
print(f"\n WARNING: Lateral RMS {opd_results['lateral_rms']:.3f} µm is notable.")
print(" OPD method recommended for accurate optimization.")

View File

@@ -0,0 +1,83 @@
"""Debug script to compare figure.dat vs BDF node coordinates."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
import numpy as np
import logging
logging.disable(logging.WARNING)
study_dir = Path(r"c:\Users\antoi\Atomizer\studies\M1_Mirror\m1_mirror_cost_reduction_V9")
# Load figure.dat
from optimization_engine.extractors.extract_zernike_figure import load_figure_geometry
fig_geo = load_figure_geometry(study_dir / "1_setup/model/figure.dat")
fig_nids = set(fig_geo.keys())
# Find OP2 and BDF
op2_file = list(study_dir.glob("3_results/best_design_archive/**/*.op2"))[0]
bdf_file = op2_file.with_suffix(".dat")
# Load BDF
from pyNastran.bdf.bdf import BDF
bdf = BDF(log=None, debug=False)
bdf.read_bdf(str(bdf_file))
bdf_nids = set(bdf.nodes.keys())
# Load OP2
from pyNastran.op2.op2 import OP2
op2 = OP2(log=None, debug=False)
op2.read_op2(str(op2_file))
disps = op2.displacements
first_key = list(disps.keys())[0]
op2_nids = set(int(n) for n in disps[first_key].node_gridtype[:,0])
print(f"Figure.dat nodes: {len(fig_nids)}")
print(f"BDF nodes: {len(bdf_nids)}")
print(f"OP2 nodes: {len(op2_nids)}")
print()
print(f"Figure ^ BDF: {len(fig_nids & bdf_nids)}")
print(f"Figure ^ OP2: {len(fig_nids & op2_nids)}")
print(f"BDF ^ OP2: {len(bdf_nids & op2_nids)}")
# Sample coords - use a node in all three
common_nids = list(fig_nids & bdf_nids & op2_nids)[:5]
print()
print("Sample common node coords comparison:")
z_diffs = []
for nid in common_nids:
fig_pos = fig_geo[nid]
bdf_pos = bdf.nodes[nid].get_position()
diff = np.array(fig_pos) - bdf_pos
z_diffs.append(diff[2])
print(f" Node {nid}:")
print(f" Figure: ({fig_pos[0]:.6f}, {fig_pos[1]:.6f}, {fig_pos[2]:.9f})")
print(f" BDF: ({bdf_pos[0]:.6f}, {bdf_pos[1]:.6f}, {bdf_pos[2]:.9f})")
print(f" Z diff: {diff[2]*1e6:.3f} nm")
# Statistics on all matching nodes
all_common = fig_nids & bdf_nids
all_z_diffs = []
all_xy_diffs = []
for nid in all_common:
fig_pos = np.array(fig_geo[nid])
bdf_pos = bdf.nodes[nid].get_position()
diff = fig_pos - bdf_pos
all_z_diffs.append(diff[2])
all_xy_diffs.append(np.sqrt(diff[0]**2 + diff[1]**2))
all_z_diffs = np.array(all_z_diffs)
all_xy_diffs = np.array(all_xy_diffs)
print()
print(f"=== ALL {len(all_common)} COMMON NODES ===")
print(f"Z difference (figure - BDF):")
print(f" Min: {all_z_diffs.min()*1e6:.3f} nm")
print(f" Max: {all_z_diffs.max()*1e6:.3f} nm")
print(f" Mean: {all_z_diffs.mean()*1e6:.3f} nm")
print(f" RMS: {np.sqrt(np.mean(all_z_diffs**2))*1e6:.3f} nm")
print()
print(f"XY difference (figure - BDF):")
print(f" Max: {all_xy_diffs.max()*1e3:.6f} um")
print(f" RMS: {np.sqrt(np.mean(all_xy_diffs**2))*1e3:.6f} um")

View File

@@ -0,0 +1,50 @@
"""Debug insights availability for a study."""
import sys
sys.path.insert(0, ".")
from pathlib import Path
# Test study path resolution
study_id = 'm1_mirror_cost_reduction_V9'
STUDIES_DIR = Path('studies')
# Check nested path
for topic_dir in STUDIES_DIR.iterdir():
if topic_dir.is_dir():
study_dir = topic_dir / study_id
if study_dir.exists():
print(f"Found study at: {study_dir}")
print(f"Has 1_setup: {(study_dir / '1_setup').exists()}")
print(f"Has 2_results: {(study_dir / '2_results').exists()}")
# Check what insights are available
from optimization_engine.insights import list_available_insights, get_configured_insights, recommend_insights_for_study
print("\n--- Available insights (can_generate=True) ---")
available = list_available_insights(study_dir)
print(f"Count: {len(available)}")
for a in available:
print(f" - {a}")
print("\n--- Configured insights ---")
configured = get_configured_insights(study_dir)
print(f"Count: {len(configured)}")
for c in configured:
print(f" - {c.type}: {c.name}")
print("\n--- Recommendations ---")
recs = recommend_insights_for_study(study_dir)
print(f"Count: {len(recs)}")
for r in recs:
print(f" - {r['type']}: {r['name']}")
# Test individual insight can_generate
print("\n--- Testing each insight's can_generate ---")
from optimization_engine.insights import get_insight, list_insights
for info in list_insights():
insight = get_insight(info['type'], study_dir)
if insight:
can = insight.can_generate()
print(f" {info['type']:20} can_generate={can}")
break

View File

@@ -0,0 +1,181 @@
"""
Debug script to investigate the lateral displacement discrepancy between
Zernike OPD output and Simcenter post-processing.
User observation:
- Zernike OPD shows max lateral XY displacement: ~0.2238 µm
- Simcenter shows XX displacement at 20deg: 7.457e-05 mm = 0.0746 µm
Hypothesis: The Zernike "max_lateral_disp_um" is sqrt(dx² + dy²), not just dx or dy.
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
from pathlib import Path
# Find a mirror study OP2 to analyze
STUDIES_PATH = Path(r"c:\Users\antoi\Atomizer\studies\M1_Mirror")
def find_latest_op2():
"""Find a recent OP2 file to analyze."""
op2_files = list(STUDIES_PATH.rglob("*.op2"))
if not op2_files:
print("No OP2 files found!")
return None
# Get most recent
return max(op2_files, key=lambda p: p.stat().st_mtime)
def analyze_displacements(op2_path: Path):
"""Analyze displacement components for all subcases."""
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
print(f"\n{'='*70}")
print(f"Analyzing: {op2_path.name}")
print(f"Path: {op2_path}")
print(f"{'='*70}")
# Find BDF
bdf_path = None
for ext in ['.dat', '.bdf']:
candidate = op2_path.with_suffix(ext)
if candidate.exists():
bdf_path = candidate
break
if not bdf_path:
print("No BDF found, searching parent...")
for f in op2_path.parent.iterdir():
if f.suffix.lower() in ['.dat', '.bdf']:
bdf_path = f
break
if not bdf_path:
print("ERROR: No geometry file found!")
return
print(f"BDF: {bdf_path.name}")
# Read data
print("\nLoading OP2...")
op2 = OP2()
op2.read_op2(str(op2_path))
print("Loading BDF...")
bdf = BDF()
bdf.read_bdf(str(bdf_path))
node_geo = {int(nid): node.get_position() for nid, node in bdf.nodes.items()}
SUBCASE_MAP = {'1': 90, '2': 20, '3': 40, '4': 60}
print(f"\nNode count in BDF: {len(node_geo)}")
print(f"\n{'='*70}")
print("DISPLACEMENT ANALYSIS BY SUBCASE")
print(f"{'='*70}")
for key, darr in op2.displacements.items():
data = darr.data
dmat = data[0] if data.ndim == 3 else (data if data.ndim == 2 else None)
if dmat is None:
continue
ngt = darr.node_gridtype.astype(int)
node_ids = ngt if ngt.ndim == 1 else ngt[:, 0]
isubcase = getattr(darr, 'isubcase', None)
label = str(isubcase) if isubcase else str(key)
angle = SUBCASE_MAP.get(label, label)
print(f"\n--- Subcase {label} ({angle} deg) ---")
print(f"Nodes with displacement: {len(node_ids)}")
# Extract displacement components
dx = dmat[:, 0] # X displacement (mm)
dy = dmat[:, 1] # Y displacement (mm)
dz = dmat[:, 2] # Z displacement (mm)
# Compute statistics
dx_um = dx * 1000.0 # Convert mm to µm
dy_um = dy * 1000.0
dz_um = dz * 1000.0
# Lateral magnitude (XY combined)
lateral_um = np.sqrt(dx_um**2 + dy_um**2)
print(f"\nComponent Statistics (in um):")
print(f" X displacement (dx):")
print(f" Min: {np.min(dx_um):+.4f} um")
print(f" Max: {np.max(dx_um):+.4f} um")
print(f" RMS: {np.sqrt(np.mean(dx_um**2)):.4f} um")
print(f" Y displacement (dy):")
print(f" Min: {np.min(dy_um):+.4f} um")
print(f" Max: {np.max(dy_um):+.4f} um")
print(f" RMS: {np.sqrt(np.mean(dy_um**2)):.4f} um")
print(f" Z displacement (dz):")
print(f" Min: {np.min(dz_um):+.4f} um")
print(f" Max: {np.max(dz_um):+.4f} um")
print(f" RMS: {np.sqrt(np.mean(dz_um**2)):.4f} um")
print(f"\n Lateral Magnitude (sqrt(dx^2 + dy^2)):")
print(f" Max: {np.max(lateral_um):.4f} um <-- This is what Zernike OPD reports!")
print(f" RMS: {np.sqrt(np.mean(lateral_um**2)):.4f} um")
# Find the node with max lateral displacement
max_lat_idx = np.argmax(lateral_um)
max_lat_nid = int(node_ids[max_lat_idx])
max_dx = dx_um[max_lat_idx]
max_dy = dy_um[max_lat_idx]
print(f"\n Node with max lateral displacement: Node {max_lat_nid}")
print(f" dx = {max_dx:+.4f} um")
print(f" dy = {max_dy:+.4f} um")
print(f" sqrt(dx^2+dy^2) = {lateral_um[max_lat_idx]:.4f} um")
# Compare with just max(|dx|)
max_abs_dx = np.max(np.abs(dx_um))
max_abs_dy = np.max(np.abs(dy_um))
print(f"\n For comparison (what you see in Simcenter):")
print(f" max(|dx|) = {max_abs_dx:.4f} um")
print(f" max(|dy|) = {max_abs_dy:.4f} um")
print(f"\n{'='*70}")
print("EXPLANATION OF DISCREPANCY")
print(f"{'='*70}")
print("""
The Zernike OPD insight reports "max_lateral_disp_um" as:
lateral = sqrt(dx^2 + dy^2) -- combined XY magnitude at each node
Simcenter's "Displacement - Nodal, X" shows:
Just the X component (dx) at each node
These are different metrics:
- If dx_max = 0.0746 um and dy is significant, then:
lateral = sqrt(0.0746^2 + dy^2) > 0.0746 um
To match Simcenter exactly, look at the individual dx/dy/dz stats above.
The "max_lateral_um" in Zernike OPD is the MAGNITUDE of the XY vector,
not the individual X or Y components.
For a node where both dx and dy are non-zero:
dx = 0.0746 um, dy = 0.21 um
lateral = sqrt(0.0746^2 + 0.21^2) = sqrt(0.0056 + 0.044) = sqrt(0.0496) = 0.22 um
""")
if __name__ == '__main__':
# Find and analyze an OP2 file
if len(sys.argv) > 1:
op2_path = Path(sys.argv[1])
else:
op2_path = find_latest_op2()
if op2_path and op2_path.exists():
analyze_displacements(op2_path)
else:
print("Please provide an OP2 file path as argument, or place OP2 files in the studies directory.")
print(f"\nUsage: python {sys.argv[0]} <path_to_op2_file>")