## Cleanup (v0.5.0) - Delete 102+ orphaned MCP session temp files - Remove build artifacts (htmlcov, dist, __pycache__) - Archive superseded plan docs (RALPH_LOOP V2/V3, CANVAS V3, etc.) - Move debug/analysis scripts from tests/ to tools/analysis/ - Archive redundant NX journals to archive/nx_journals/ - Archive monolithic PROTOCOL.md to docs/archive/ - Update .gitignore with missing patterns - Clean old study files (optimization_log_old.txt, run_optimization_old.py) ## Canvas UX (Phases 7-9) - Phase 7: Resizable panels with localStorage persistence - Left sidebar: 200-400px, Right panel: 280-600px - New useResizablePanel hook and ResizeHandle component - Phase 8: Enable all palette items - All 8 node types now draggable - Singleton logic for model/solver/algorithm/surrogate - Phase 9: Solver configuration - Add SolverEngine type (nxnastran, mscnastran, python, etc.) - Add NastranSolutionType (SOL101-SOL200) - Engine/solution dropdowns in config panel - Python script path support ## Documentation - Update CHANGELOG.md with recent versions - Update docs/00_INDEX.md - Create examples/README.md - Add docs/plans/CANVAS_UX_IMPROVEMENTS.md
151 lines
5.0 KiB
Python
151 lines
5.0 KiB
Python
"""Analyze V11 optimization results."""
|
|
import sqlite3
|
|
from pathlib import Path
|
|
import json
|
|
import sys
|
|
sys.path.insert(0, '.')
|
|
|
|
db_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11/3_results/study.db')
|
|
|
|
conn = sqlite3.connect(db_path)
|
|
c = conn.cursor()
|
|
|
|
# Get all completed trials with their objectives
|
|
c.execute('''
|
|
SELECT t.trial_id
|
|
FROM trials t
|
|
WHERE t.state = 'COMPLETE'
|
|
ORDER BY t.trial_id
|
|
''')
|
|
completed_ids = [row[0] for row in c.fetchall()]
|
|
print(f'Completed trials: {len(completed_ids)}')
|
|
|
|
# Build trial data
|
|
trials = []
|
|
for tid in completed_ids:
|
|
c.execute('''
|
|
SELECT key, value_json
|
|
FROM trial_user_attributes
|
|
WHERE trial_id = ?
|
|
''', (tid,))
|
|
attrs = {row[0]: json.loads(row[1]) for row in c.fetchall()}
|
|
|
|
if 'rel_filtered_rms_40_vs_20' in attrs:
|
|
trials.append({
|
|
'id': tid,
|
|
'rms_40': attrs.get('rel_filtered_rms_40_vs_20', 999),
|
|
'rms_60': attrs.get('rel_filtered_rms_60_vs_20', 999),
|
|
'mfg_90': attrs.get('mfg_90_optician_workload', 999),
|
|
'ws': attrs.get('weighted_sum', 999),
|
|
'lateral': attrs.get('lateral_rms_um', 0),
|
|
})
|
|
|
|
# Sort by weighted sum or calculate it
|
|
for t in trials:
|
|
if t['ws'] == 999:
|
|
# Calculate weighted sum
|
|
w40 = 100 / 4.0 # Target 4 nm
|
|
w60 = 50 / 10.0 # Target 10 nm
|
|
w90 = 20 / 20.0 # Target 20 nm
|
|
t['ws'] = w40 * t['rms_40'] + w60 * t['rms_60'] + w90 * t['mfg_90']
|
|
|
|
# Find best
|
|
trials.sort(key=lambda x: x['ws'])
|
|
best = trials[0] if trials else None
|
|
|
|
print('\nV11 Optimization Summary')
|
|
print('=' * 70)
|
|
print(f'Completed trials: {len(trials)}')
|
|
|
|
if trials:
|
|
rms40 = [t['rms_40'] for t in trials]
|
|
rms60 = [t['rms_60'] for t in trials]
|
|
mfg90 = [t['mfg_90'] for t in trials]
|
|
|
|
print(f'\n40-20 RMS range: {min(rms40):.2f} - {max(rms40):.2f} nm (target: 4.0 nm)')
|
|
print(f'60-20 RMS range: {min(rms60):.2f} - {max(rms60):.2f} nm (target: 10.0 nm)')
|
|
print(f'90 MFG range: {min(mfg90):.2f} - {max(mfg90):.2f} nm (target: 20.0 nm)')
|
|
|
|
print(f'\nBest Trial (#{best["id"]}):')
|
|
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
|
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
|
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
|
print(f' Lateral: {best["lateral"]:.3f} um')
|
|
print(f' WS: {best["ws"]:.1f}')
|
|
|
|
# Check if values make sense (not too good to be true)
|
|
print('\nSanity Check:')
|
|
if best['rms_40'] < 3.0:
|
|
print(' WARNING: 40-20 RMS suspiciously low!')
|
|
else:
|
|
print(f' 40-20 RMS {best["rms_40"]:.2f} nm - looks reasonable (expected ~6-7nm)')
|
|
|
|
if best['rms_60'] < 8.0:
|
|
print(' WARNING: 60-20 RMS suspiciously low!')
|
|
else:
|
|
print(f' 60-20 RMS {best["rms_60"]:.2f} nm - looks reasonable (expected ~13-15nm)')
|
|
|
|
# Compare to V7 baseline and V10 buggy values
|
|
print('\n' + '=' * 70)
|
|
print('Comparison to Known Values:')
|
|
print('=' * 70)
|
|
print('\nV7 Baseline (original Zernike, correct):')
|
|
print(' 40-20 RMS: 6.05 nm')
|
|
print(' 60-20 RMS: 13.03 nm')
|
|
print(' MFG 90: 26.34 nm')
|
|
|
|
print('\nV10 BUGGY (abs(RMS_target - RMS_ref) - WRONG):')
|
|
print(' 40-20 RMS: ~1.99 nm <- WAY TOO LOW')
|
|
print(' 60-20 RMS: ~6.82 nm <- WAY TOO LOW')
|
|
|
|
print('\nV11 Best (extract_relative() - CORRECT):')
|
|
print(f' 40-20 RMS: {best["rms_40"]:.2f} nm')
|
|
print(f' 60-20 RMS: {best["rms_60"]:.2f} nm')
|
|
print(f' MFG 90: {best["mfg_90"]:.2f} nm')
|
|
|
|
# Verdict
|
|
print('\n' + '=' * 70)
|
|
print('VERDICT:')
|
|
print('=' * 70)
|
|
if best['rms_40'] > 5.0 and best['rms_40'] < 10.0:
|
|
print(' 40-20: CORRECT - matches expected V7 range')
|
|
else:
|
|
print(f' 40-20: INVESTIGATE - {best["rms_40"]:.2f} nm is outside expected range')
|
|
|
|
if best['rms_60'] > 10.0 and best['rms_60'] < 20.0:
|
|
print(' 60-20: CORRECT - matches expected V7 range')
|
|
else:
|
|
print(f' 60-20: INVESTIGATE - {best["rms_60"]:.2f} nm is outside expected range')
|
|
|
|
conn.close()
|
|
|
|
# Now generate Zernike dashboard for visual verification
|
|
print('\n' + '=' * 70)
|
|
print('Generating Zernike Dashboard for Best Iteration...')
|
|
print('=' * 70)
|
|
|
|
from optimization_engine.insights import ZernikeDashboardInsight, InsightConfig
|
|
|
|
study_path = Path('studies/M1_Mirror/m1_mirror_cost_reduction_V11')
|
|
|
|
# Find the iteration with the best design
|
|
best_archive = study_path / '3_results' / 'best_design_archive'
|
|
if best_archive.exists():
|
|
op2_files = list(best_archive.glob('*.op2'))
|
|
if op2_files:
|
|
print(f'Found best design archive: {op2_files[0].name}')
|
|
|
|
insight = ZernikeDashboardInsight(study_path)
|
|
if insight.can_generate():
|
|
print('Generating dashboard...')
|
|
result = insight.generate(InsightConfig())
|
|
if result.success:
|
|
print(f'\nDashboard generated: {result.html_path}')
|
|
print(f'\nDashboard Summary:')
|
|
for key, value in result.summary.items():
|
|
print(f' {key}: {value}')
|
|
else:
|
|
print(f'Error: {result.error}')
|
|
else:
|
|
print('Cannot generate insight - no OP2 files found')
|