feat: Add Zernike wavefront viewer and V14 TPE optimization study
Dashboard Zernike Analysis:
- Add ZernikeViewer component with tabbed UI (40°, 60°, 90° vs 20°)
- Generate 3D surface mesh plots with Mesh3d triangulation
- Full 50-mode Zernike coefficient tables with mode names
- Manufacturing metrics for 90_vs_20 (optician workload analysis)
- OP2 availability filter for FEA trials only
- Fix duplicate trial display with unique React keys
- Tab switching with proper event propagation
Backend API Enhancements:
- GET /studies/{id}/trials/{num}/zernike - Generate Zernike HTML on-demand
- GET /studies/{id}/zernike-available - List trials with OP2 files
- compute_manufacturing_metrics() for aberration analysis
- compute_rms_filter_j1to3() for optician workload metric
M1 Mirror V14 Study:
- TPE (Tree-structured Parzen Estimator) optimization
- Seeds from 496 prior FEA trials (V11+V12+V13)
- Weighted-sum objective: 5*obj_40 + 5*obj_60 + 1*obj_mfg
- Multivariate TPE with constant_liar for efficient exploration
- Ready for 8-hour overnight runs
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -446,12 +446,17 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
|
||||
# Extract source for FEA vs NN differentiation
|
||||
source = user_attrs.get("source", "FEA") # Default to FEA for legacy studies
|
||||
|
||||
# Use trial_id as unique identifier when multiple Optuna studies exist
|
||||
# This avoids trial number collisions between studies
|
||||
unique_trial_num = trial_id if study_name else trial_num
|
||||
# Get iter_num from user_attrs if available (this is the actual iteration folder number)
|
||||
iter_num = user_attrs.get("iter_num", None)
|
||||
|
||||
# Use iter_num if available, otherwise use trial_id as unique identifier
|
||||
# trial_id is unique across all studies in the database
|
||||
unique_trial_num = iter_num if iter_num is not None else trial_id
|
||||
|
||||
trials.append({
|
||||
"trial_number": unique_trial_num,
|
||||
"trial_id": trial_id, # Keep original for debugging
|
||||
"optuna_trial_num": trial_num, # Keep original Optuna trial number
|
||||
"objective": values[0] if len(values) > 0 else None, # Primary objective
|
||||
"objectives": values if len(values) > 1 else None, # All objectives for multi-objective
|
||||
"design_variables": final_design_vars, # Use merged design vars
|
||||
@@ -2227,6 +2232,575 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update config: {str(e)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Zernike Analysis Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.get("/studies/{study_id}/zernike-available")
|
||||
async def get_zernike_available_trials(study_id: str):
|
||||
"""
|
||||
Get list of trial numbers that have Zernike analysis available (OP2 files).
|
||||
|
||||
Returns:
|
||||
JSON with list of trial numbers that have iteration folders with OP2 files
|
||||
"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
|
||||
|
||||
iter_base = study_dir / "2_iterations"
|
||||
if not iter_base.exists():
|
||||
return {"study_id": study_id, "available_trials": [], "count": 0}
|
||||
|
||||
available_trials = []
|
||||
for d in iter_base.iterdir():
|
||||
if d.is_dir() and d.name.startswith('iter'):
|
||||
# Check for OP2 file
|
||||
op2_files = list(d.glob("*.op2"))
|
||||
if op2_files:
|
||||
iter_num_str = d.name.replace('iter', '')
|
||||
try:
|
||||
iter_num = int(iter_num_str)
|
||||
# Map iter number to trial number (iter1 -> trial 0, etc.)
|
||||
# But also keep iter_num as possibility
|
||||
if iter_num != 9999:
|
||||
available_trials.append(iter_num - 1) # 0-indexed trial
|
||||
else:
|
||||
available_trials.append(9999) # Special test iteration
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
available_trials.sort()
|
||||
return {
|
||||
"study_id": study_id,
|
||||
"available_trials": available_trials,
|
||||
"count": len(available_trials)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get available trials: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/trials/{trial_number}/zernike")
|
||||
async def get_trial_zernike(study_id: str, trial_number: int):
|
||||
"""
|
||||
Generate or retrieve Zernike analysis HTML for a specific trial.
|
||||
|
||||
This endpoint generates interactive Zernike wavefront analysis for mirror
|
||||
optimization trials. It produces 3D surface residual plots, RMS metrics,
|
||||
and coefficient bar charts for each angle comparison (40_vs_20, 60_vs_20, 90_vs_20).
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
trial_number: Trial/iteration number
|
||||
|
||||
Returns:
|
||||
JSON with HTML content for each comparison, or error if OP2 not found
|
||||
"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
|
||||
|
||||
# Find iteration directory
|
||||
# Trial numbers in Optuna DB may differ from iteration folder numbers
|
||||
# Common patterns:
|
||||
# 1. iter{trial_number} - direct mapping
|
||||
# 2. iter{trial_number + 1} - 0-indexed trials vs 1-indexed folders
|
||||
# 3. Check for actual folder existence
|
||||
|
||||
iter_dir = None
|
||||
possible_iter_nums = [trial_number, trial_number + 1]
|
||||
|
||||
for iter_num in possible_iter_nums:
|
||||
candidate = study_dir / "2_iterations" / f"iter{iter_num}"
|
||||
if candidate.exists():
|
||||
iter_dir = candidate
|
||||
break
|
||||
|
||||
if iter_dir is None:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"No FEA results for trial {trial_number}. This trial may have used surrogate model (NN) prediction instead of full FEA simulation. Zernike analysis requires OP2 results from actual FEA runs."
|
||||
)
|
||||
|
||||
# Check for OP2 file BEFORE doing expensive imports
|
||||
op2_files = list(iter_dir.glob("*.op2"))
|
||||
if not op2_files:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"No OP2 results file found in {iter_dir.name}. FEA may not have completed."
|
||||
)
|
||||
|
||||
# Only import heavy dependencies after we know we have an OP2 file
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
import numpy as np
|
||||
from math import factorial
|
||||
import plotly.graph_objects as go
|
||||
from plotly.subplots import make_subplots
|
||||
from matplotlib.tri import Triangulation
|
||||
|
||||
# Configuration
|
||||
N_MODES = 50
|
||||
AMP = 50.0 # Increased from 2.0 for better visibility
|
||||
PANCAKE = 5.0 # Reduced from 10.0 for more Z range
|
||||
PLOT_DOWNSAMPLE = 5000 # Reduced for faster loading
|
||||
FILTER_LOW_ORDERS = 4
|
||||
|
||||
SUBCASE_MAP = {
|
||||
'1': '90', '2': '20', '3': '40', '4': '60',
|
||||
}
|
||||
REF_SUBCASE = '2'
|
||||
|
||||
def noll_indices(j: int):
|
||||
if j < 1:
|
||||
raise ValueError("Noll index j must be >= 1")
|
||||
count = 0
|
||||
n = 0
|
||||
while True:
|
||||
if n == 0:
|
||||
ms = [0]
|
||||
elif n % 2 == 0:
|
||||
ms = [0] + [m for k in range(1, n//2 + 1) for m in (-2*k, 2*k)]
|
||||
else:
|
||||
ms = [m for k in range(0, (n+1)//2) for m in (-(2*k+1), (2*k+1))]
|
||||
for m in ms:
|
||||
count += 1
|
||||
if count == j:
|
||||
return n, m
|
||||
n += 1
|
||||
|
||||
def zernike_noll(j: int, r: np.ndarray, th: np.ndarray) -> np.ndarray:
|
||||
n, m = noll_indices(j)
|
||||
R = np.zeros_like(r)
|
||||
for s in range((n-abs(m))//2 + 1):
|
||||
c = ((-1)**s * factorial(n-s) /
|
||||
(factorial(s) *
|
||||
factorial((n+abs(m))//2 - s) *
|
||||
factorial((n-abs(m))//2 - s)))
|
||||
R += c * r**(n-2*s)
|
||||
if m == 0:
|
||||
return R
|
||||
return R * (np.cos(m*th) if m > 0 else np.sin(-m*th))
|
||||
|
||||
def zernike_common_name(n: int, m: int) -> str:
|
||||
names = {
|
||||
(0, 0): "Piston", (1, -1): "Tilt X", (1, 1): "Tilt Y",
|
||||
(2, 0): "Defocus", (2, -2): "Astig 45°", (2, 2): "Astig 0°",
|
||||
(3, -1): "Coma X", (3, 1): "Coma Y", (3, -3): "Trefoil X", (3, 3): "Trefoil Y",
|
||||
(4, 0): "Primary Spherical", (4, -2): "Sec Astig X", (4, 2): "Sec Astig Y",
|
||||
(4, -4): "Quadrafoil X", (4, 4): "Quadrafoil Y",
|
||||
(5, -1): "Sec Coma X", (5, 1): "Sec Coma Y",
|
||||
(5, -3): "Sec Trefoil X", (5, 3): "Sec Trefoil Y",
|
||||
(5, -5): "Pentafoil X", (5, 5): "Pentafoil Y",
|
||||
(6, 0): "Sec Spherical",
|
||||
}
|
||||
return names.get((n, m), f"Z(n={n}, m={m})")
|
||||
|
||||
def zernike_label(j: int) -> str:
|
||||
n, m = noll_indices(j)
|
||||
return f"J{j:02d} - {zernike_common_name(n, m)}"
|
||||
|
||||
def compute_manufacturing_metrics(coefficients: np.ndarray) -> dict:
|
||||
"""Compute manufacturing-related aberration metrics."""
|
||||
return {
|
||||
'defocus_nm': float(abs(coefficients[3])), # J4
|
||||
'astigmatism_rms': float(np.sqrt(coefficients[4]**2 + coefficients[5]**2)), # J5+J6
|
||||
'coma_rms': float(np.sqrt(coefficients[6]**2 + coefficients[7]**2)), # J7+J8
|
||||
'trefoil_rms': float(np.sqrt(coefficients[8]**2 + coefficients[9]**2)), # J9+J10
|
||||
'spherical_nm': float(abs(coefficients[10])) if len(coefficients) > 10 else 0.0, # J11
|
||||
}
|
||||
|
||||
def compute_rms_filter_j1to3(X, Y, W_nm, coefficients, R):
|
||||
"""Compute RMS with J1-J3 filtered (keeping defocus for optician workload)."""
|
||||
Xc = X - np.mean(X)
|
||||
Yc = Y - np.mean(Y)
|
||||
r = np.hypot(Xc/R, Yc/R)
|
||||
th = np.arctan2(Yc, Xc)
|
||||
Z_j1to3 = np.column_stack([zernike_noll(j, r, th) for j in range(1, 4)])
|
||||
W_filter_j1to3 = W_nm - Z_j1to3 @ coefficients[:3]
|
||||
return float(np.sqrt(np.mean(W_filter_j1to3**2)))
|
||||
|
||||
def generate_zernike_html(
|
||||
title: str,
|
||||
X: np.ndarray,
|
||||
Y: np.ndarray,
|
||||
W_nm: np.ndarray,
|
||||
coefficients: np.ndarray,
|
||||
rms_global: float,
|
||||
rms_filtered: float,
|
||||
ref_title: str = "20 deg",
|
||||
abs_pair = None,
|
||||
is_manufacturing: bool = False,
|
||||
mfg_metrics: dict = None,
|
||||
correction_metrics: dict = None
|
||||
) -> str:
|
||||
"""Generate HTML string for Zernike visualization with full tables."""
|
||||
# Compute residual surface (filtered)
|
||||
Xc = X - np.mean(X)
|
||||
Yc = Y - np.mean(Y)
|
||||
R = float(np.max(np.hypot(Xc, Yc)))
|
||||
r = np.hypot(Xc/R, Yc/R)
|
||||
th = np.arctan2(Yc, Xc)
|
||||
|
||||
Z = np.column_stack([zernike_noll(j, r, th) for j in range(1, N_MODES+1)])
|
||||
W_res_filt = W_nm - Z[:, :FILTER_LOW_ORDERS].dot(coefficients[:FILTER_LOW_ORDERS])
|
||||
|
||||
# Compute J1-J3 filtered RMS (optician workload metric)
|
||||
rms_filter_j1to3 = compute_rms_filter_j1to3(X, Y, W_nm, coefficients, R)
|
||||
|
||||
# Downsample for display
|
||||
n = len(X)
|
||||
if n > PLOT_DOWNSAMPLE:
|
||||
rng = np.random.default_rng(42)
|
||||
sel = rng.choice(n, size=PLOT_DOWNSAMPLE, replace=False)
|
||||
Xp, Yp, Wp = X[sel], Y[sel], W_res_filt[sel]
|
||||
else:
|
||||
Xp, Yp, Wp = X, Y, W_res_filt
|
||||
|
||||
res_amp = AMP * Wp
|
||||
max_amp = float(np.max(np.abs(res_amp))) if res_amp.size else 1.0
|
||||
|
||||
# Create SURFACE mesh (not just points)
|
||||
surface_trace = None
|
||||
try:
|
||||
tri = Triangulation(Xp, Yp)
|
||||
if tri.triangles is not None and len(tri.triangles) > 0:
|
||||
i_idx, j_idx, k_idx = tri.triangles.T
|
||||
surface_trace = go.Mesh3d(
|
||||
x=Xp.tolist(), y=Yp.tolist(), z=res_amp.tolist(),
|
||||
i=i_idx.tolist(), j=j_idx.tolist(), k=k_idx.tolist(),
|
||||
intensity=res_amp.tolist(),
|
||||
colorscale='RdBu',
|
||||
opacity=0.95,
|
||||
flatshading=True,
|
||||
showscale=True,
|
||||
colorbar=dict(title="Residual (nm)", titleside='right', len=0.6)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Triangulation failed: {e}")
|
||||
|
||||
labels = [zernike_label(j) for j in range(1, N_MODES+1)]
|
||||
coeff_abs = np.abs(coefficients)
|
||||
mfg = compute_manufacturing_metrics(coefficients)
|
||||
|
||||
# Determine layout based on whether this is manufacturing (90 deg) view
|
||||
if is_manufacturing and mfg_metrics and correction_metrics:
|
||||
# Manufacturing view: 5 rows
|
||||
fig = make_subplots(
|
||||
rows=5, cols=1,
|
||||
specs=[[{"type": "scene"}],
|
||||
[{"type": "table"}],
|
||||
[{"type": "table"}],
|
||||
[{"type": "table"}],
|
||||
[{"type": "xy"}]],
|
||||
row_heights=[0.35, 0.10, 0.15, 0.15, 0.25],
|
||||
vertical_spacing=0.025,
|
||||
subplot_titles=[
|
||||
f"<b>Surface Residual (relative to {ref_title})</b>",
|
||||
"<b>RMS Metrics</b>",
|
||||
"<b>Mode Magnitudes (Absolute 90 deg)</b>",
|
||||
"<b>Pre-Correction (90 deg - 20 deg)</b>",
|
||||
f"<b>Zernike Coefficients ({N_MODES} modes)</b>"
|
||||
]
|
||||
)
|
||||
else:
|
||||
# Standard relative view: 4 rows with full coefficient table
|
||||
fig = make_subplots(
|
||||
rows=4, cols=1,
|
||||
specs=[[{"type": "scene"}],
|
||||
[{"type": "table"}],
|
||||
[{"type": "table"}],
|
||||
[{"type": "xy"}]],
|
||||
row_heights=[0.40, 0.12, 0.28, 0.20],
|
||||
vertical_spacing=0.03,
|
||||
subplot_titles=[
|
||||
f"<b>Surface Residual (relative to {ref_title})</b>",
|
||||
"<b>RMS Metrics</b>",
|
||||
f"<b>Zernike Coefficients ({N_MODES} modes)</b>",
|
||||
"<b>Top 20 |Zernike Coefficients| (nm)</b>"
|
||||
]
|
||||
)
|
||||
|
||||
# Add surface mesh (or fallback to scatter)
|
||||
if surface_trace is not None:
|
||||
fig.add_trace(surface_trace, row=1, col=1)
|
||||
else:
|
||||
# Fallback to scatter if triangulation failed
|
||||
fig.add_trace(go.Scatter3d(
|
||||
x=Xp.tolist(), y=Yp.tolist(), z=res_amp.tolist(),
|
||||
mode='markers',
|
||||
marker=dict(size=2, color=res_amp.tolist(), colorscale='RdBu', showscale=True),
|
||||
showlegend=False
|
||||
), row=1, col=1)
|
||||
|
||||
fig.update_scenes(
|
||||
camera=dict(eye=dict(x=0.8, y=0.8, z=0.6)),
|
||||
zaxis=dict(range=[-max_amp * PANCAKE, max_amp * PANCAKE]),
|
||||
aspectmode='data'
|
||||
)
|
||||
|
||||
# Row 2: RMS table with all metrics
|
||||
if abs_pair is not None:
|
||||
abs_global, abs_filtered = abs_pair
|
||||
fig.add_trace(go.Table(
|
||||
header=dict(
|
||||
values=["<b>Metric</b>", "<b>Relative (nm)</b>", "<b>Absolute (nm)</b>"],
|
||||
align="left",
|
||||
fill_color='rgb(55, 83, 109)',
|
||||
font=dict(color='white', size=12)
|
||||
),
|
||||
cells=dict(
|
||||
values=[
|
||||
["Global RMS", "Filtered RMS (J1-J4)", "Filtered RMS (J1-J3, w/ defocus)"],
|
||||
[f"{rms_global:.2f}", f"{rms_filtered:.2f}", f"{rms_filter_j1to3:.2f}"],
|
||||
[f"{abs_global:.2f}", f"{abs_filtered:.2f}", "-"],
|
||||
],
|
||||
align="left",
|
||||
fill_color='rgb(243, 243, 243)'
|
||||
)
|
||||
), row=2, col=1)
|
||||
else:
|
||||
fig.add_trace(go.Table(
|
||||
header=dict(
|
||||
values=["<b>Metric</b>", "<b>Value (nm)</b>"],
|
||||
align="left",
|
||||
fill_color='rgb(55, 83, 109)',
|
||||
font=dict(color='white', size=12)
|
||||
),
|
||||
cells=dict(
|
||||
values=[
|
||||
["Global RMS", "Filtered RMS (J1-J4)", "Filtered RMS (J1-J3, w/ defocus)"],
|
||||
[f"{rms_global:.2f}", f"{rms_filtered:.2f}", f"{rms_filter_j1to3:.2f}"]
|
||||
],
|
||||
align="left",
|
||||
fill_color='rgb(243, 243, 243)'
|
||||
)
|
||||
), row=2, col=1)
|
||||
|
||||
if is_manufacturing and mfg_metrics and correction_metrics:
|
||||
# Row 3: Mode magnitudes at 90 deg (absolute)
|
||||
fig.add_trace(go.Table(
|
||||
header=dict(
|
||||
values=["<b>Mode</b>", "<b>Value (nm)</b>"],
|
||||
align="left",
|
||||
fill_color='rgb(55, 83, 109)',
|
||||
font=dict(color='white', size=11)
|
||||
),
|
||||
cells=dict(
|
||||
values=[
|
||||
["Defocus (J4)", "Astigmatism (J5+J6)", "Coma (J7+J8)", "Trefoil (J9+J10)", "Spherical (J11)"],
|
||||
[f"{mfg_metrics['defocus_nm']:.2f}", f"{mfg_metrics['astigmatism_rms']:.2f}",
|
||||
f"{mfg_metrics['coma_rms']:.2f}", f"{mfg_metrics['trefoil_rms']:.2f}",
|
||||
f"{mfg_metrics['spherical_nm']:.2f}"]
|
||||
],
|
||||
align="left",
|
||||
fill_color='rgb(243, 243, 243)'
|
||||
)
|
||||
), row=3, col=1)
|
||||
|
||||
# Row 4: Pre-correction (90 deg - 20 deg)
|
||||
fig.add_trace(go.Table(
|
||||
header=dict(
|
||||
values=["<b>Correction Mode</b>", "<b>Value (nm)</b>"],
|
||||
align="left",
|
||||
fill_color='rgb(55, 83, 109)',
|
||||
font=dict(color='white', size=11)
|
||||
),
|
||||
cells=dict(
|
||||
values=[
|
||||
["Total RMS (J1-J3 filter)", "Defocus (J4)", "Astigmatism (J5+J6)", "Coma (J7+J8)"],
|
||||
[f"{correction_metrics.get('rms_filter_j1to3', 0):.2f}",
|
||||
f"{correction_metrics['defocus_nm']:.2f}",
|
||||
f"{correction_metrics['astigmatism_rms']:.2f}",
|
||||
f"{correction_metrics['coma_rms']:.2f}"]
|
||||
],
|
||||
align="left",
|
||||
fill_color='rgb(243, 243, 243)'
|
||||
)
|
||||
), row=4, col=1)
|
||||
|
||||
# Row 5: Bar chart
|
||||
sorted_idx = np.argsort(coeff_abs)[::-1][:20]
|
||||
fig.add_trace(
|
||||
go.Bar(
|
||||
x=[float(coeff_abs[i]) for i in sorted_idx],
|
||||
y=[labels[i] for i in sorted_idx],
|
||||
orientation='h',
|
||||
marker_color='rgb(55, 83, 109)',
|
||||
hovertemplate="%{y}<br>|Coeff| = %{x:.3f} nm<extra></extra>",
|
||||
showlegend=False
|
||||
),
|
||||
row=5, col=1
|
||||
)
|
||||
else:
|
||||
# Row 3: Full coefficient table
|
||||
fig.add_trace(go.Table(
|
||||
header=dict(
|
||||
values=["<b>Noll j</b>", "<b>Mode Name</b>", "<b>Coeff (nm)</b>", "<b>|Coeff| (nm)</b>"],
|
||||
align="left",
|
||||
fill_color='rgb(55, 83, 109)',
|
||||
font=dict(color='white', size=11)
|
||||
),
|
||||
cells=dict(
|
||||
values=[
|
||||
list(range(1, N_MODES+1)),
|
||||
labels,
|
||||
[f"{c:+.3f}" for c in coefficients],
|
||||
[f"{abs(c):.3f}" for c in coefficients]
|
||||
],
|
||||
align="left",
|
||||
fill_color='rgb(243, 243, 243)',
|
||||
font=dict(size=10),
|
||||
height=22
|
||||
)
|
||||
), row=3, col=1)
|
||||
|
||||
# Row 4: Bar chart - top 20 modes by magnitude
|
||||
sorted_idx = np.argsort(coeff_abs)[::-1][:20]
|
||||
fig.add_trace(
|
||||
go.Bar(
|
||||
x=[float(coeff_abs[i]) for i in sorted_idx],
|
||||
y=[labels[i] for i in sorted_idx],
|
||||
orientation='h',
|
||||
marker_color='rgb(55, 83, 109)',
|
||||
hovertemplate="%{y}<br>|Coeff| = %{x:.3f} nm<extra></extra>",
|
||||
showlegend=False
|
||||
),
|
||||
row=4, col=1
|
||||
)
|
||||
|
||||
fig.update_layout(
|
||||
width=1400,
|
||||
height=1800 if is_manufacturing else 1600,
|
||||
margin=dict(t=80, b=20, l=20, r=20),
|
||||
title=dict(
|
||||
text=f"<b>{title}</b>",
|
||||
font=dict(size=20),
|
||||
x=0.5
|
||||
),
|
||||
paper_bgcolor='white',
|
||||
plot_bgcolor='white'
|
||||
)
|
||||
|
||||
return fig.to_html(include_plotlyjs='cdn', full_html=True)
|
||||
|
||||
# Load OP2 and generate reports
|
||||
op2_path = op2_files[0]
|
||||
extractor = ZernikeExtractor(str(op2_path), displacement_unit='mm', n_modes=N_MODES)
|
||||
|
||||
results = {}
|
||||
comparisons = [
|
||||
('3', '2', '40_vs_20', '40 deg vs 20 deg'),
|
||||
('4', '2', '60_vs_20', '60 deg vs 20 deg'),
|
||||
('1', '2', '90_vs_20', '90 deg vs 20 deg (manufacturing)'),
|
||||
]
|
||||
|
||||
# Pre-compute absolute 90 deg metrics for manufacturing view
|
||||
abs_90_data = None
|
||||
abs_90_metrics = None
|
||||
if '1' in extractor.displacements:
|
||||
abs_90_data = extractor.extract_subcase('1', include_coefficients=True)
|
||||
abs_90_metrics = compute_manufacturing_metrics(np.array(abs_90_data['coefficients']))
|
||||
|
||||
for target_sc, ref_sc, key, title_suffix in comparisons:
|
||||
if target_sc not in extractor.displacements:
|
||||
continue
|
||||
|
||||
# Get relative data with coefficients
|
||||
rel_data = extractor.extract_relative(target_sc, ref_sc, include_coefficients=True)
|
||||
|
||||
# Get absolute data for this subcase
|
||||
abs_data = extractor.extract_subcase(target_sc, include_coefficients=True)
|
||||
|
||||
# Build coordinate arrays
|
||||
target_disp = extractor.displacements[target_sc]
|
||||
ref_disp = extractor.displacements[ref_sc]
|
||||
|
||||
ref_node_to_idx = {int(nid): i for i, nid in enumerate(ref_disp['node_ids'])}
|
||||
X_list, Y_list, W_list = [], [], []
|
||||
|
||||
for i, nid in enumerate(target_disp['node_ids']):
|
||||
nid = int(nid)
|
||||
if nid not in ref_node_to_idx:
|
||||
continue
|
||||
geo = extractor.node_geometry.get(nid)
|
||||
if geo is None:
|
||||
continue
|
||||
|
||||
ref_idx = ref_node_to_idx[nid]
|
||||
target_wfe = target_disp['disp'][i, 2] * extractor.wfe_factor
|
||||
ref_wfe = ref_disp['disp'][ref_idx, 2] * extractor.wfe_factor
|
||||
|
||||
X_list.append(geo[0])
|
||||
Y_list.append(geo[1])
|
||||
W_list.append(target_wfe - ref_wfe)
|
||||
|
||||
X = np.array(X_list)
|
||||
Y = np.array(Y_list)
|
||||
W = np.array(W_list)
|
||||
|
||||
target_angle = SUBCASE_MAP.get(target_sc, target_sc)
|
||||
ref_angle = SUBCASE_MAP.get(ref_sc, ref_sc)
|
||||
|
||||
# Check if this is the manufacturing (90 deg) comparison
|
||||
is_mfg = (key == '90_vs_20')
|
||||
|
||||
# Compute correction metrics (relative coefficients) for manufacturing view
|
||||
correction_metrics = None
|
||||
if is_mfg and 'coefficients' in rel_data:
|
||||
correction_metrics = compute_manufacturing_metrics(np.array(rel_data['coefficients']))
|
||||
# Also compute rms_filter_j1to3 for the relative data
|
||||
R = float(np.max(np.hypot(X - np.mean(X), Y - np.mean(Y))))
|
||||
correction_metrics['rms_filter_j1to3'] = compute_rms_filter_j1to3(
|
||||
X, Y, W, np.array(rel_data['coefficients']), R
|
||||
)
|
||||
|
||||
html_content = generate_zernike_html(
|
||||
title=f"iter{trial_number}: {target_angle} deg vs {ref_angle} deg",
|
||||
X=X, Y=Y, W_nm=W,
|
||||
coefficients=np.array(rel_data['coefficients']),
|
||||
rms_global=rel_data['relative_global_rms_nm'],
|
||||
rms_filtered=rel_data['relative_filtered_rms_nm'],
|
||||
ref_title=f"{ref_angle} deg",
|
||||
abs_pair=(abs_data['global_rms_nm'], abs_data['filtered_rms_nm']),
|
||||
is_manufacturing=is_mfg,
|
||||
mfg_metrics=abs_90_metrics if is_mfg else None,
|
||||
correction_metrics=correction_metrics
|
||||
)
|
||||
|
||||
results[key] = {
|
||||
"html": html_content,
|
||||
"rms_global": rel_data['relative_global_rms_nm'],
|
||||
"rms_filtered": rel_data['relative_filtered_rms_nm'],
|
||||
"title": f"{target_angle}° vs {ref_angle}°"
|
||||
}
|
||||
|
||||
if not results:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to generate Zernike analysis. Check if subcases are available."
|
||||
)
|
||||
|
||||
return {
|
||||
"study_id": study_id,
|
||||
"trial_number": trial_number,
|
||||
"comparisons": results,
|
||||
"available_comparisons": list(results.keys())
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=500, detail=f"Failed to generate Zernike analysis: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/export/{format}")
|
||||
async def export_study_data(study_id: str, format: str):
|
||||
"""Export study data in various formats: csv, json, excel"""
|
||||
|
||||
301
atomizer-dashboard/frontend/src/components/ZernikeViewer.tsx
Normal file
301
atomizer-dashboard/frontend/src/components/ZernikeViewer.tsx
Normal file
@@ -0,0 +1,301 @@
|
||||
/**
|
||||
* Zernike Viewer Component
|
||||
* Displays interactive Zernike wavefront analysis for mirror optimization trials
|
||||
*
|
||||
* Features:
|
||||
* - 3D surface residual plots (Plotly)
|
||||
* - RMS metrics tables
|
||||
* - Zernike coefficient bar charts
|
||||
* - Tab navigation for different angle comparisons (40°, 60°, 90° vs 20°)
|
||||
*/
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { X, RefreshCw, Activity, ChevronLeft, ChevronRight, ExternalLink } from 'lucide-react';
|
||||
|
||||
interface ZernikeComparison {
|
||||
html: string;
|
||||
rms_global: number;
|
||||
rms_filtered: number;
|
||||
title: string;
|
||||
}
|
||||
|
||||
interface ZernikeData {
|
||||
study_id: string;
|
||||
trial_number: number;
|
||||
comparisons: Record<string, ZernikeComparison>;
|
||||
available_comparisons: string[];
|
||||
}
|
||||
|
||||
interface ZernikeViewerProps {
|
||||
studyId: string;
|
||||
trialNumber: number;
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
export function ZernikeViewer({ studyId, trialNumber, onClose }: ZernikeViewerProps) {
|
||||
const [data, setData] = useState<ZernikeData | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [selectedTab, setSelectedTab] = useState<string>('40_vs_20');
|
||||
|
||||
const fetchZernikeData = async () => {
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
try {
|
||||
const response = await fetch(
|
||||
`/api/optimization/studies/${studyId}/trials/${trialNumber}/zernike`
|
||||
);
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(errorData.detail || `HTTP ${response.status}`);
|
||||
}
|
||||
const result = await response.json();
|
||||
setData(result);
|
||||
// Select first available tab
|
||||
if (result.available_comparisons?.length > 0) {
|
||||
setSelectedTab(result.available_comparisons[0]);
|
||||
}
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to load Zernike analysis');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
fetchZernikeData();
|
||||
}, [studyId, trialNumber]);
|
||||
|
||||
// Tab labels for display
|
||||
const tabLabels: Record<string, string> = {
|
||||
'40_vs_20': '40° vs 20°',
|
||||
'60_vs_20': '60° vs 20°',
|
||||
'90_vs_20': '90° vs 20° (Mfg)',
|
||||
};
|
||||
|
||||
// Get current comparison data
|
||||
const currentComparison = data?.comparisons[selectedTab];
|
||||
|
||||
// Navigate between tabs
|
||||
const navigateTab = (direction: 'prev' | 'next') => {
|
||||
if (!data?.available_comparisons) return;
|
||||
const currentIndex = data.available_comparisons.indexOf(selectedTab);
|
||||
if (direction === 'prev' && currentIndex > 0) {
|
||||
setSelectedTab(data.available_comparisons[currentIndex - 1]);
|
||||
} else if (direction === 'next' && currentIndex < data.available_comparisons.length - 1) {
|
||||
setSelectedTab(data.available_comparisons[currentIndex + 1]);
|
||||
}
|
||||
};
|
||||
|
||||
// Open in new window
|
||||
const openInNewWindow = () => {
|
||||
if (!currentComparison?.html) return;
|
||||
const newWindow = window.open('', '_blank');
|
||||
if (newWindow) {
|
||||
newWindow.document.write(currentComparison.html);
|
||||
newWindow.document.close();
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
className="fixed inset-0 z-50 flex items-center justify-center bg-black/80"
|
||||
onClick={onClose}
|
||||
>
|
||||
<div
|
||||
className="bg-dark-800 rounded-xl shadow-2xl w-[98vw] max-w-[1800px] h-[95vh] flex flex-col border border-dark-600"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-6 py-4 border-b border-dark-600 bg-dark-700/50">
|
||||
<div className="flex items-center gap-3">
|
||||
<Activity className="text-primary-400" size={24} />
|
||||
<div>
|
||||
<h2 className="text-lg font-semibold text-dark-100">
|
||||
Zernike Analysis - Trial #{trialNumber}
|
||||
</h2>
|
||||
<p className="text-sm text-dark-400">{studyId}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
{/* RMS Quick Summary */}
|
||||
{currentComparison && (
|
||||
<div className="flex items-center gap-4 mr-4 px-4 py-2 bg-dark-600 rounded-lg">
|
||||
<div className="text-center">
|
||||
<div className="text-xs text-dark-400">Global RMS</div>
|
||||
<div className="text-sm font-mono text-primary-300">
|
||||
{currentComparison.rms_global.toFixed(2)} nm
|
||||
</div>
|
||||
</div>
|
||||
<div className="text-center border-l border-dark-500 pl-4">
|
||||
<div className="text-xs text-dark-400">Filtered RMS</div>
|
||||
<div className="text-sm font-mono text-green-400">
|
||||
{currentComparison.rms_filtered.toFixed(2)} nm
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<button
|
||||
onClick={fetchZernikeData}
|
||||
className="p-2 hover:bg-dark-600 rounded-lg transition-colors"
|
||||
title="Refresh"
|
||||
>
|
||||
<RefreshCw size={18} className={`text-dark-300 ${loading ? 'animate-spin' : ''}`} />
|
||||
</button>
|
||||
<button
|
||||
onClick={openInNewWindow}
|
||||
className="p-2 hover:bg-dark-600 rounded-lg transition-colors"
|
||||
title="Open in new window"
|
||||
disabled={!currentComparison}
|
||||
>
|
||||
<ExternalLink size={18} className="text-dark-300" />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-2 hover:bg-dark-600 rounded-lg transition-colors"
|
||||
>
|
||||
<X size={20} className="text-dark-300" />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Tabs */}
|
||||
{data?.available_comparisons && data.available_comparisons.length > 0 && (
|
||||
<div className="flex items-center justify-between px-6 py-2 border-b border-dark-600 bg-dark-700/30">
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
navigateTab('prev');
|
||||
}}
|
||||
disabled={data.available_comparisons.indexOf(selectedTab) === 0}
|
||||
className="p-2 hover:bg-dark-600 rounded-lg transition-colors disabled:opacity-30"
|
||||
>
|
||||
<ChevronLeft size={20} className="text-dark-300" />
|
||||
</button>
|
||||
|
||||
<div className="flex gap-2">
|
||||
{data.available_comparisons.map((tab) => (
|
||||
<button
|
||||
key={tab}
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
setSelectedTab(tab);
|
||||
}}
|
||||
className={`px-4 py-2 rounded-lg text-sm font-medium transition-colors ${
|
||||
selectedTab === tab
|
||||
? 'bg-primary-600 text-white'
|
||||
: 'bg-dark-600 text-dark-200 hover:bg-dark-500'
|
||||
}`}
|
||||
>
|
||||
{tabLabels[tab] || tab}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
navigateTab('next');
|
||||
}}
|
||||
disabled={data.available_comparisons.indexOf(selectedTab) === data.available_comparisons.length - 1}
|
||||
className="p-2 hover:bg-dark-600 rounded-lg transition-colors disabled:opacity-30"
|
||||
>
|
||||
<ChevronRight size={20} className="text-dark-300" />
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-hidden bg-white">
|
||||
{loading && (
|
||||
<div className="flex flex-col items-center justify-center h-full bg-dark-700">
|
||||
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-primary-400 mb-4"></div>
|
||||
<p className="text-dark-300">Generating Zernike analysis...</p>
|
||||
<p className="text-dark-500 text-sm mt-2">This may take a few seconds for large meshes</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{error && (
|
||||
<div className="flex flex-col items-center justify-center h-full text-dark-400 bg-dark-700">
|
||||
<Activity size={48} className="mb-4 opacity-50" />
|
||||
<p className="text-lg font-medium text-yellow-400 mb-2">
|
||||
{error.includes('surrogate') || error.includes('NN') ? 'No FEA Results' : 'Analysis Failed'}
|
||||
</p>
|
||||
<p className="text-dark-300 mb-4 max-w-lg text-center">{error}</p>
|
||||
{error.includes('surrogate') || error.includes('NN') ? (
|
||||
<p className="text-dark-500 text-sm mb-4">
|
||||
Try selecting a trial with "FEA" source tag instead of "NN"
|
||||
</p>
|
||||
) : (
|
||||
<button
|
||||
onClick={fetchZernikeData}
|
||||
className="px-4 py-2 text-sm bg-dark-600 hover:bg-dark-500 text-dark-200 rounded-lg transition-colors"
|
||||
>
|
||||
Try Again
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="mt-4 px-4 py-2 text-sm bg-dark-700 hover:bg-dark-600 text-dark-300 rounded-lg transition-colors border border-dark-500"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!loading && !error && currentComparison && (
|
||||
<iframe
|
||||
srcDoc={currentComparison.html}
|
||||
className="w-full h-full border-0"
|
||||
title={`Zernike Analysis - ${tabLabels[selectedTab] || selectedTab}`}
|
||||
sandbox="allow-scripts allow-same-origin"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Button component to trigger Zernike viewer for a trial
|
||||
* Used in Dashboard trial list
|
||||
*/
|
||||
interface ZernikeButtonProps {
|
||||
studyId: string;
|
||||
trialNumber: number;
|
||||
compact?: boolean;
|
||||
}
|
||||
|
||||
export function ZernikeButton({ studyId, trialNumber, compact = false }: ZernikeButtonProps) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<>
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation(); // Prevent trial row expansion
|
||||
setIsOpen(true);
|
||||
}}
|
||||
className={`flex items-center gap-1 ${
|
||||
compact
|
||||
? 'px-2 py-1 text-xs'
|
||||
: 'px-3 py-1.5 text-sm'
|
||||
} bg-indigo-600 hover:bg-indigo-700 text-white rounded transition-colors font-medium`}
|
||||
title="View Zernike wavefront analysis"
|
||||
>
|
||||
<Activity size={compact ? 12 : 14} />
|
||||
<span>{compact ? 'Zernike' : 'View Zernike'}</span>
|
||||
</button>
|
||||
|
||||
{isOpen && (
|
||||
<ZernikeViewer
|
||||
studyId={studyId}
|
||||
trialNumber={trialNumber}
|
||||
onClose={() => setIsOpen(false)}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import { ParameterImportanceChart } from '../components/ParameterImportanceChart
|
||||
import { ConvergencePlot } from '../components/ConvergencePlot';
|
||||
import { StudyReportViewer } from '../components/StudyReportViewer';
|
||||
import { ConsoleOutput } from '../components/ConsoleOutput';
|
||||
import { ZernikeButton } from '../components/ZernikeViewer';
|
||||
import { ExpandableChart } from '../components/ExpandableChart';
|
||||
import { CurrentTrialPanel, OptimizerStatePanel } from '../components/tracker';
|
||||
import type { Trial } from '../types';
|
||||
@@ -49,6 +50,8 @@ export default function Dashboard() {
|
||||
const [sortBy, setSortBy] = useState<'performance' | 'chronological'>('performance');
|
||||
const [trialsPage, setTrialsPage] = useState(0);
|
||||
const trialsPerPage = 50; // Limit trials per page for performance
|
||||
const [showOnlyFEA, setShowOnlyFEA] = useState(false); // Filter to show only trials with OP2 results
|
||||
const [zernikeAvailableTrials, setZernikeAvailableTrials] = useState<Set<number>>(new Set()); // Trials with OP2 files
|
||||
|
||||
// Parameter Space axis selection (reserved for future use)
|
||||
const [_paramXIndex, _setParamXIndex] = useState(0);
|
||||
@@ -98,7 +101,12 @@ export default function Dashboard() {
|
||||
onMessage: (msg) => {
|
||||
if (msg.type === 'trial_completed') {
|
||||
const trial = msg.data as Trial;
|
||||
setAllTrials(prev => [...prev, trial]);
|
||||
// Avoid duplicates by checking if trial already exists
|
||||
setAllTrials(prev => {
|
||||
const exists = prev.some(t => t.trial_number === trial.trial_number);
|
||||
if (exists) return prev;
|
||||
return [...prev, trial];
|
||||
});
|
||||
if (trial.objective !== null && trial.objective !== undefined && trial.objective < bestValue) {
|
||||
const improvement = previousBestRef.current !== Infinity
|
||||
? ((previousBestRef.current - trial.objective) / Math.abs(previousBestRef.current)) * 100
|
||||
@@ -199,6 +207,18 @@ export default function Dashboard() {
|
||||
setIsRunning(data.is_running);
|
||||
})
|
||||
.catch(err => console.error('Failed to load process status:', err));
|
||||
|
||||
// Fetch available Zernike trials (for mirror/zernike studies)
|
||||
if (selectedStudyId.includes('mirror') || selectedStudyId.includes('zernike')) {
|
||||
fetch(`/api/optimization/studies/${selectedStudyId}/zernike-available`)
|
||||
.then(res => res.json())
|
||||
.then(data => {
|
||||
setZernikeAvailableTrials(new Set(data.available_trials || []));
|
||||
})
|
||||
.catch(err => console.error('Failed to load Zernike available trials:', err));
|
||||
} else {
|
||||
setZernikeAvailableTrials(new Set());
|
||||
}
|
||||
}
|
||||
}, [selectedStudyId]);
|
||||
|
||||
@@ -217,22 +237,30 @@ export default function Dashboard() {
|
||||
return () => clearInterval(pollStatus);
|
||||
}, [selectedStudyId]);
|
||||
|
||||
// Sort trials based on selected sort order
|
||||
// Sort and filter trials based on selected options
|
||||
useEffect(() => {
|
||||
let sorted = [...allTrials];
|
||||
let filtered = [...allTrials];
|
||||
|
||||
// Filter to trials with OP2 results (for Zernike analysis)
|
||||
if (showOnlyFEA && zernikeAvailableTrials.size > 0) {
|
||||
filtered = filtered.filter(t => zernikeAvailableTrials.has(t.trial_number));
|
||||
}
|
||||
|
||||
// Sort
|
||||
if (sortBy === 'performance') {
|
||||
// Sort by objective (best first)
|
||||
sorted.sort((a, b) => {
|
||||
filtered.sort((a, b) => {
|
||||
const aObj = a.objective ?? Infinity;
|
||||
const bObj = b.objective ?? Infinity;
|
||||
return aObj - bObj;
|
||||
});
|
||||
} else {
|
||||
// Chronological (newest first)
|
||||
sorted.sort((a, b) => b.trial_number - a.trial_number);
|
||||
filtered.sort((a, b) => b.trial_number - a.trial_number);
|
||||
}
|
||||
setDisplayedTrials(sorted);
|
||||
}, [allTrials, sortBy]);
|
||||
setDisplayedTrials(filtered);
|
||||
setTrialsPage(0); // Reset pagination when filter changes
|
||||
}, [allTrials, sortBy, showOnlyFEA, zernikeAvailableTrials]);
|
||||
|
||||
// Auto-refresh polling for trial history
|
||||
// PERFORMANCE: Use limit and longer interval for large studies
|
||||
@@ -649,8 +677,22 @@ export default function Dashboard() {
|
||||
<Card
|
||||
title={
|
||||
<div className="flex items-center justify-between w-full">
|
||||
<span>Trial History ({displayedTrials.length} trials)</span>
|
||||
<span>Trial History ({displayedTrials.length}{showOnlyFEA ? ' with OP2' : ''} trials{showOnlyFEA && allTrials.length !== displayedTrials.length ? ` of ${allTrials.length}` : ''})</span>
|
||||
<div className="flex gap-2 items-center">
|
||||
{/* OP2/Zernike toggle - only show for mirror/zernike studies */}
|
||||
{selectedStudyId && (selectedStudyId.includes('mirror') || selectedStudyId.includes('zernike')) && zernikeAvailableTrials.size > 0 && (
|
||||
<button
|
||||
onClick={() => setShowOnlyFEA(!showOnlyFEA)}
|
||||
className={`px-3 py-1 rounded text-sm ${
|
||||
showOnlyFEA
|
||||
? 'bg-green-600 text-white'
|
||||
: 'bg-dark-500 text-dark-200 hover:bg-dark-400'
|
||||
}`}
|
||||
title={`Show only ${zernikeAvailableTrials.size} trials with OP2 results (for Zernike analysis)`}
|
||||
>
|
||||
OP2 Only ({zernikeAvailableTrials.size})
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={() => setSortBy('performance')}
|
||||
className={`px-3 py-1 rounded text-sm ${
|
||||
@@ -699,13 +741,13 @@ export default function Dashboard() {
|
||||
>
|
||||
<div className="space-y-2 max-h-[600px] overflow-y-auto">
|
||||
{displayedTrials.length > 0 ? (
|
||||
displayedTrials.slice(trialsPage * trialsPerPage, (trialsPage + 1) * trialsPerPage).map(trial => {
|
||||
displayedTrials.slice(trialsPage * trialsPerPage, (trialsPage + 1) * trialsPerPage).map((trial, idx) => {
|
||||
const isExpanded = expandedTrials.has(trial.trial_number);
|
||||
const isBest = trial.objective === bestValue;
|
||||
|
||||
return (
|
||||
<div
|
||||
key={trial.trial_number}
|
||||
key={`trial-${trial.trial_number}-${idx}`}
|
||||
className={`rounded-lg transition-all duration-200 cursor-pointer ${
|
||||
isBest
|
||||
? 'bg-green-900 border-l-4 border-green-400'
|
||||
@@ -728,6 +770,14 @@ export default function Dashboard() {
|
||||
? trial.objective.toFixed(4)
|
||||
: 'N/A'}
|
||||
</span>
|
||||
{/* Zernike viewer button - only show for mirror/Zernike studies */}
|
||||
{selectedStudyId && (selectedStudyId.includes('mirror') || selectedStudyId.includes('zernike')) && (
|
||||
<ZernikeButton
|
||||
studyId={selectedStudyId}
|
||||
trialNumber={trial.trial_number}
|
||||
compact
|
||||
/>
|
||||
)}
|
||||
<span className="text-dark-400 text-sm">
|
||||
{isExpanded ? '▼' : '▶'}
|
||||
</span>
|
||||
@@ -829,6 +879,16 @@ export default function Dashboard() {
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Zernike Analysis Button - Full size in expanded view */}
|
||||
{selectedStudyId && (selectedStudyId.includes('mirror') || selectedStudyId.includes('zernike')) && (
|
||||
<div className="border-t border-dark-400 pt-3 mt-3">
|
||||
<ZernikeButton
|
||||
studyId={selectedStudyId}
|
||||
trialNumber={trial.trial_number}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
198
studies/m1_mirror_adaptive_V14/1_setup/optimization_config.json
Normal file
198
studies/m1_mirror_adaptive_V14/1_setup/optimization_config.json
Normal file
@@ -0,0 +1,198 @@
|
||||
{
|
||||
"$schema": "Atomizer M1 Mirror TPE Optimization V14",
|
||||
"study_name": "m1_mirror_adaptive_V14",
|
||||
"description": "V14 - TPE single-objective optimization seeded from V11+V12+V13 FEA trials. Weighted-sum objective for efficient convergence.",
|
||||
|
||||
"source_studies": {
|
||||
"v11": {
|
||||
"database": "../m1_mirror_adaptive_V11/3_results/study.db",
|
||||
"description": "V11 FEA trials (107 from V10 + V11)"
|
||||
},
|
||||
"v12": {
|
||||
"database": "../m1_mirror_adaptive_V12/3_results/study.db",
|
||||
"description": "V12 FEA trials from GNN validation"
|
||||
},
|
||||
"v13": {
|
||||
"database": "../m1_mirror_adaptive_V13/3_results/study.db",
|
||||
"description": "V13 FEA trials from NSGA-II run"
|
||||
}
|
||||
},
|
||||
|
||||
"design_variables": [
|
||||
{
|
||||
"name": "lateral_inner_angle",
|
||||
"expression_name": "lateral_inner_angle",
|
||||
"min": 25.0,
|
||||
"max": 28.5,
|
||||
"baseline": 26.79,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_angle",
|
||||
"expression_name": "lateral_outer_angle",
|
||||
"min": 13.0,
|
||||
"max": 17.0,
|
||||
"baseline": 14.64,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_pivot",
|
||||
"expression_name": "lateral_outer_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.40,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_inner_pivot",
|
||||
"expression_name": "lateral_inner_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.07,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_middle_pivot",
|
||||
"expression_name": "lateral_middle_pivot",
|
||||
"min": 18.0,
|
||||
"max": 23.0,
|
||||
"baseline": 20.73,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_closeness",
|
||||
"expression_name": "lateral_closeness",
|
||||
"min": 9.5,
|
||||
"max": 12.5,
|
||||
"baseline": 11.02,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_min",
|
||||
"expression_name": "whiffle_min",
|
||||
"min": 35.0,
|
||||
"max": 55.0,
|
||||
"baseline": 40.55,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_outer_to_vertical",
|
||||
"expression_name": "whiffle_outer_to_vertical",
|
||||
"min": 68.0,
|
||||
"max": 80.0,
|
||||
"baseline": 75.67,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_triangle_closeness",
|
||||
"expression_name": "whiffle_triangle_closeness",
|
||||
"min": 50.0,
|
||||
"max": 65.0,
|
||||
"baseline": 60.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "blank_backface_angle",
|
||||
"expression_name": "blank_backface_angle",
|
||||
"min": 4,
|
||||
"max": 5.0,
|
||||
"baseline": 4.23,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "inner_circular_rib_dia",
|
||||
"expression_name": "inner_circular_rib_dia",
|
||||
"min": 480.0,
|
||||
"max": 620.0,
|
||||
"baseline": 534.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
}
|
||||
],
|
||||
|
||||
"objectives": [
|
||||
{
|
||||
"name": "rel_filtered_rms_40_vs_20",
|
||||
"description": "Filtered RMS WFE at 40 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 4.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "3",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "rel_filtered_rms_60_vs_20",
|
||||
"description": "Filtered RMS WFE at 60 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 10.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "4",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "mfg_90_optician_workload",
|
||||
"description": "Manufacturing deformation at 90 deg polishing (J1-J3 filtered RMS)",
|
||||
"direction": "minimize",
|
||||
"weight": 1.0,
|
||||
"target": 20.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "1",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_rms_filter_j1to3"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"zernike_settings": {
|
||||
"n_modes": 50,
|
||||
"filter_low_orders": 4,
|
||||
"displacement_unit": "mm",
|
||||
"subcases": ["1", "2", "3", "4"],
|
||||
"subcase_labels": {"1": "90deg", "2": "20deg", "3": "40deg", "4": "60deg"},
|
||||
"reference_subcase": "2"
|
||||
},
|
||||
|
||||
"tpe_settings": {
|
||||
"n_startup_trials": 10,
|
||||
"n_ei_candidates": 24,
|
||||
"multivariate": true,
|
||||
"constant_liar": true,
|
||||
"seed": 42
|
||||
},
|
||||
|
||||
"nx_settings": {
|
||||
"nx_install_path": "C:\\Program Files\\Siemens\\NX2506",
|
||||
"sim_file": "ASSY_M1_assyfem1_sim1.sim",
|
||||
"solution_name": "Solution 1",
|
||||
"op2_pattern": "*-solution_1.op2",
|
||||
"simulation_timeout_s": 600,
|
||||
"journal_timeout_s": 120,
|
||||
"op2_timeout_s": 1800,
|
||||
"auto_start_nx": true
|
||||
},
|
||||
|
||||
"dashboard_settings": {
|
||||
"trial_source_tag": true,
|
||||
"fea_marker": "circle",
|
||||
"fea_color": "#4CAF50"
|
||||
}
|
||||
}
|
||||
639
studies/m1_mirror_adaptive_V14/run_optimization.py
Normal file
639
studies/m1_mirror_adaptive_V14/run_optimization.py
Normal file
@@ -0,0 +1,639 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
M1 Mirror TPE FEA Optimization V14
|
||||
===================================
|
||||
|
||||
TPE (Tree-structured Parzen Estimator) optimization seeded from V11+V12+V13.
|
||||
Uses weighted-sum objective for efficient single-objective convergence.
|
||||
|
||||
Key Features:
|
||||
1. TPE sampler - efficient Bayesian optimization for single objectives
|
||||
2. Seeds from all prior FEA trials (~150+ from V11, V12, V13)
|
||||
3. Weighted-sum objective: 5*obj_40 + 5*obj_60 + 1*obj_mfg
|
||||
4. Individual objectives tracked as user attributes
|
||||
|
||||
Usage:
|
||||
python run_optimization.py --start
|
||||
python run_optimization.py --start --trials 50
|
||||
python run_optimization.py --start --trials 50 --resume
|
||||
|
||||
For 8-hour overnight run (~55 trials at 8-9 min/trial):
|
||||
python run_optimization.py --start --trials 55
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import argparse
|
||||
import logging
|
||||
import sqlite3
|
||||
import shutil
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
|
||||
# Add parent directories to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
import optuna
|
||||
from optuna.samplers import TPESampler
|
||||
|
||||
# Atomizer imports
|
||||
from optimization_engine.nx_solver import NXSolver
|
||||
from optimization_engine.utils import ensure_nx_running
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
|
||||
# ============================================================================
|
||||
# Paths
|
||||
# ============================================================================
|
||||
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
SETUP_DIR = STUDY_DIR / "1_setup"
|
||||
ITERATIONS_DIR = STUDY_DIR / "2_iterations"
|
||||
RESULTS_DIR = STUDY_DIR / "3_results"
|
||||
CONFIG_PATH = SETUP_DIR / "optimization_config.json"
|
||||
|
||||
# Source studies for seeding
|
||||
V11_DB = STUDY_DIR.parent / "m1_mirror_adaptive_V11" / "3_results" / "study.db"
|
||||
V12_DB = STUDY_DIR.parent / "m1_mirror_adaptive_V12" / "3_results" / "study.db"
|
||||
V13_DB = STUDY_DIR.parent / "m1_mirror_adaptive_V13" / "3_results" / "study.db"
|
||||
|
||||
# Ensure directories exist
|
||||
ITERATIONS_DIR.mkdir(exist_ok=True)
|
||||
RESULTS_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# Logging
|
||||
LOG_FILE = RESULTS_DIR / "optimization.log"
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s | %(levelname)-8s | %(message)s',
|
||||
handlers=[
|
||||
logging.StreamHandler(sys.stdout),
|
||||
logging.FileHandler(LOG_FILE, mode='a')
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Objective names and weights
|
||||
# ============================================================================
|
||||
|
||||
OBJ_NAMES = [
|
||||
'rel_filtered_rms_40_vs_20',
|
||||
'rel_filtered_rms_60_vs_20',
|
||||
'mfg_90_optician_workload'
|
||||
]
|
||||
|
||||
# Weights for weighted-sum objective (from config)
|
||||
OBJ_WEIGHTS = {
|
||||
'rel_filtered_rms_40_vs_20': 5.0,
|
||||
'rel_filtered_rms_60_vs_20': 5.0,
|
||||
'mfg_90_optician_workload': 1.0
|
||||
}
|
||||
|
||||
DESIGN_VAR_NAMES = [
|
||||
'lateral_inner_angle', 'lateral_outer_angle', 'lateral_outer_pivot',
|
||||
'lateral_inner_pivot', 'lateral_middle_pivot', 'lateral_closeness',
|
||||
'whiffle_min', 'whiffle_outer_to_vertical', 'whiffle_triangle_closeness',
|
||||
'blank_backface_angle', 'inner_circular_rib_dia'
|
||||
]
|
||||
|
||||
|
||||
def compute_weighted_sum(objectives: Dict[str, float]) -> float:
|
||||
"""Compute weighted sum of objectives."""
|
||||
total = 0.0
|
||||
for name, weight in OBJ_WEIGHTS.items():
|
||||
total += weight * objectives.get(name, 1000.0)
|
||||
return total
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Prior Data Loader
|
||||
# ============================================================================
|
||||
|
||||
def load_fea_trials_from_db(db_path: Path, label: str) -> List[Dict]:
|
||||
"""Load FEA trials from an Optuna database."""
|
||||
if not db_path.exists():
|
||||
logger.warning(f"{label} database not found: {db_path}")
|
||||
return []
|
||||
|
||||
fea_data = []
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute('''
|
||||
SELECT trial_id, number FROM trials
|
||||
WHERE state = 'COMPLETE'
|
||||
''')
|
||||
trials = cursor.fetchall()
|
||||
|
||||
for trial_id, trial_num in trials:
|
||||
# Get user attributes
|
||||
cursor.execute('''
|
||||
SELECT key, value_json FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
''', (trial_id,))
|
||||
attrs = {row[0]: json.loads(row[1]) for row in cursor.fetchall()}
|
||||
|
||||
# Check if FEA trial (source contains 'FEA')
|
||||
source = attrs.get('source', 'FEA')
|
||||
if isinstance(source, str) and 'FEA' not in source:
|
||||
continue # Skip NN trials
|
||||
|
||||
# Get params
|
||||
cursor.execute('''
|
||||
SELECT param_name, param_value FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
''', (trial_id,))
|
||||
params = {name: float(value) for name, value in cursor.fetchall()}
|
||||
|
||||
if not params:
|
||||
continue
|
||||
|
||||
# Get objectives (stored as individual attributes or in 'objectives')
|
||||
objectives = {}
|
||||
if 'objectives' in attrs:
|
||||
objectives = attrs['objectives']
|
||||
else:
|
||||
# Try individual attributes
|
||||
for obj_name in OBJ_NAMES:
|
||||
if obj_name in attrs:
|
||||
objectives[obj_name] = attrs[obj_name]
|
||||
|
||||
if all(k in objectives for k in OBJ_NAMES):
|
||||
fea_data.append({
|
||||
'trial_num': trial_num,
|
||||
'params': params,
|
||||
'objectives': objectives,
|
||||
'source': f'{label}_{source}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading {label} data: {e}")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
logger.info(f"Loaded {len(fea_data)} FEA trials from {label}")
|
||||
return fea_data
|
||||
|
||||
|
||||
def load_all_prior_fea_data() -> List[Dict]:
|
||||
"""Load FEA trials from V11, V12, and V13."""
|
||||
all_data = []
|
||||
|
||||
# V11 data
|
||||
v11_data = load_fea_trials_from_db(V11_DB, "V11")
|
||||
all_data.extend(v11_data)
|
||||
|
||||
# V12 data
|
||||
v12_data = load_fea_trials_from_db(V12_DB, "V12")
|
||||
all_data.extend(v12_data)
|
||||
|
||||
# V13 data
|
||||
v13_data = load_fea_trials_from_db(V13_DB, "V13")
|
||||
all_data.extend(v13_data)
|
||||
|
||||
logger.info(f"Total prior FEA trials: {len(all_data)}")
|
||||
return all_data
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# FEA Runner
|
||||
# ============================================================================
|
||||
|
||||
class FEARunner:
|
||||
"""Runs actual FEA simulations."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self.nx_solver = None
|
||||
self.nx_manager = None
|
||||
self.master_model_dir = SETUP_DIR / "model"
|
||||
|
||||
def setup(self):
|
||||
"""Setup NX and solver."""
|
||||
logger.info("Setting up NX session...")
|
||||
|
||||
study_name = self.config.get('study_name', 'm1_mirror_adaptive_V14')
|
||||
|
||||
try:
|
||||
self.nx_manager, nx_was_started = ensure_nx_running(
|
||||
session_id=study_name,
|
||||
auto_start=True,
|
||||
start_timeout=120
|
||||
)
|
||||
logger.info("NX session ready" + (" (started)" if nx_was_started else " (existing)"))
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to setup NX: {e}")
|
||||
raise
|
||||
|
||||
# Initialize solver
|
||||
nx_settings = self.config.get('nx_settings', {})
|
||||
nx_install_dir = nx_settings.get('nx_install_path', 'C:\\Program Files\\Siemens\\NX2506')
|
||||
version_match = re.search(r'NX(\d+)', nx_install_dir)
|
||||
nastran_version = version_match.group(1) if version_match else "2506"
|
||||
|
||||
self.nx_solver = NXSolver(
|
||||
master_model_dir=str(self.master_model_dir),
|
||||
nx_install_dir=nx_install_dir,
|
||||
nastran_version=nastran_version,
|
||||
timeout=nx_settings.get('simulation_timeout_s', 600),
|
||||
use_iteration_folders=True,
|
||||
study_name="m1_mirror_adaptive_V14"
|
||||
)
|
||||
|
||||
def run_fea(self, params: Dict[str, float], trial_num: int) -> Optional[Dict]:
|
||||
"""Run FEA and extract objectives."""
|
||||
if self.nx_solver is None:
|
||||
self.setup()
|
||||
|
||||
logger.info(f" [FEA {trial_num}] Running simulation...")
|
||||
|
||||
expressions = {var['expression_name']: params[var['name']]
|
||||
for var in self.config['design_variables']}
|
||||
|
||||
iter_folder = self.nx_solver.create_iteration_folder(
|
||||
iterations_base_dir=ITERATIONS_DIR,
|
||||
iteration_number=trial_num,
|
||||
expression_updates=expressions
|
||||
)
|
||||
|
||||
try:
|
||||
nx_settings = self.config.get('nx_settings', {})
|
||||
sim_file = iter_folder / nx_settings.get('sim_file', 'ASSY_M1_assyfem1_sim1.sim')
|
||||
|
||||
t_start = time.time()
|
||||
|
||||
result = self.nx_solver.run_simulation(
|
||||
sim_file=sim_file,
|
||||
working_dir=iter_folder,
|
||||
expression_updates=expressions,
|
||||
solution_name=nx_settings.get('solution_name', 'Solution 1'),
|
||||
cleanup=False
|
||||
)
|
||||
|
||||
solve_time = time.time() - t_start
|
||||
|
||||
if not result['success']:
|
||||
logger.error(f" [FEA {trial_num}] Solve failed: {result.get('error')}")
|
||||
return None
|
||||
|
||||
logger.info(f" [FEA {trial_num}] Solved in {solve_time:.1f}s")
|
||||
|
||||
# Extract objectives
|
||||
op2_path = Path(result['op2_file'])
|
||||
objectives = self._extract_objectives(op2_path)
|
||||
|
||||
if objectives is None:
|
||||
return None
|
||||
|
||||
weighted_sum = compute_weighted_sum(objectives)
|
||||
|
||||
logger.info(f" [FEA {trial_num}] 40-20: {objectives['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
logger.info(f" [FEA {trial_num}] 60-20: {objectives['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
logger.info(f" [FEA {trial_num}] Mfg: {objectives['mfg_90_optician_workload']:.2f} nm")
|
||||
logger.info(f" [FEA {trial_num}] Weighted Sum: {weighted_sum:.2f}")
|
||||
|
||||
return {
|
||||
'trial_num': trial_num,
|
||||
'params': params,
|
||||
'objectives': objectives,
|
||||
'weighted_sum': weighted_sum,
|
||||
'source': 'FEA',
|
||||
'solve_time': solve_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f" [FEA {trial_num}] Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def _extract_objectives(self, op2_path: Path) -> Optional[Dict[str, float]]:
|
||||
"""Extract objectives using ZernikeExtractor."""
|
||||
try:
|
||||
zernike_settings = self.config.get('zernike_settings', {})
|
||||
|
||||
extractor = ZernikeExtractor(
|
||||
op2_path,
|
||||
bdf_path=None,
|
||||
displacement_unit=zernike_settings.get('displacement_unit', 'mm'),
|
||||
n_modes=zernike_settings.get('n_modes', 50),
|
||||
filter_orders=zernike_settings.get('filter_low_orders', 4)
|
||||
)
|
||||
|
||||
ref = zernike_settings.get('reference_subcase', '2')
|
||||
|
||||
rel_40 = extractor.extract_relative("3", ref)
|
||||
rel_60 = extractor.extract_relative("4", ref)
|
||||
rel_90 = extractor.extract_relative("1", ref)
|
||||
|
||||
return {
|
||||
'rel_filtered_rms_40_vs_20': rel_40['relative_filtered_rms_nm'],
|
||||
'rel_filtered_rms_60_vs_20': rel_60['relative_filtered_rms_nm'],
|
||||
'mfg_90_optician_workload': rel_90['relative_rms_filter_j1to3']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Zernike extraction failed: {e}")
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup NX session."""
|
||||
if self.nx_manager:
|
||||
if self.nx_manager.can_close_nx():
|
||||
self.nx_manager.close_nx_if_allowed()
|
||||
self.nx_manager.cleanup()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# TPE Optimizer
|
||||
# ============================================================================
|
||||
|
||||
class TPEOptimizer:
|
||||
"""TPE-based FEA optimizer with weighted-sum objective."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self.fea_runner = FEARunner(config)
|
||||
|
||||
# Load prior data for seeding
|
||||
self.prior_data = load_all_prior_fea_data()
|
||||
|
||||
# Database
|
||||
self.db_path = RESULTS_DIR / "study.db"
|
||||
self.storage = optuna.storages.RDBStorage(f'sqlite:///{self.db_path}')
|
||||
|
||||
# State
|
||||
self.trial_count = 0
|
||||
self.best_value = float('inf')
|
||||
self.best_trial = None
|
||||
|
||||
def _get_next_trial_number(self) -> int:
|
||||
"""Get the next trial number based on existing iterations."""
|
||||
existing = list(ITERATIONS_DIR.glob("iter*"))
|
||||
if not existing:
|
||||
return 1
|
||||
max_num = max(int(p.name.replace("iter", "")) for p in existing)
|
||||
return max_num + 1
|
||||
|
||||
def seed_from_prior(self, study: optuna.Study):
|
||||
"""Seed the study with prior FEA trials."""
|
||||
if not self.prior_data:
|
||||
logger.warning("No prior data to seed from")
|
||||
return
|
||||
|
||||
logger.info(f"Seeding study with {len(self.prior_data)} prior FEA trials...")
|
||||
|
||||
for i, d in enumerate(self.prior_data):
|
||||
try:
|
||||
# Create a trial with the prior data
|
||||
distributions = {}
|
||||
for var in self.config['design_variables']:
|
||||
if var.get('enabled', False):
|
||||
distributions[var['name']] = optuna.distributions.FloatDistribution(
|
||||
var['min'], var['max']
|
||||
)
|
||||
|
||||
# Compute weighted sum for the prior trial
|
||||
weighted_sum = compute_weighted_sum(d['objectives'])
|
||||
|
||||
# Create frozen trial
|
||||
frozen_trial = optuna.trial.create_trial(
|
||||
params=d['params'],
|
||||
distributions=distributions,
|
||||
values=[weighted_sum], # Single objective for TPE
|
||||
user_attrs={
|
||||
'source': d.get('source', 'prior_FEA'),
|
||||
'rel_filtered_rms_40_vs_20': d['objectives']['rel_filtered_rms_40_vs_20'],
|
||||
'rel_filtered_rms_60_vs_20': d['objectives']['rel_filtered_rms_60_vs_20'],
|
||||
'mfg_90_optician_workload': d['objectives']['mfg_90_optician_workload'],
|
||||
'weighted_sum': weighted_sum,
|
||||
}
|
||||
)
|
||||
|
||||
study.add_trial(frozen_trial)
|
||||
|
||||
# Track best
|
||||
if weighted_sum < self.best_value:
|
||||
self.best_value = weighted_sum
|
||||
self.best_trial = d
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to seed trial {i}: {e}")
|
||||
|
||||
logger.info(f"Seeded {len(study.trials)} trials")
|
||||
if self.best_trial:
|
||||
logger.info(f"Best prior: weighted_sum={self.best_value:.2f}")
|
||||
logger.info(f" 40-20: {self.best_trial['objectives']['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
logger.info(f" 60-20: {self.best_trial['objectives']['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
logger.info(f" Mfg: {self.best_trial['objectives']['mfg_90_optician_workload']:.2f} nm")
|
||||
|
||||
def run(self, n_trials: int = 50, resume: bool = False):
|
||||
"""Run TPE optimization."""
|
||||
logger.info("\n" + "=" * 70)
|
||||
logger.info("M1 MIRROR TPE FEA OPTIMIZATION V14")
|
||||
logger.info("=" * 70)
|
||||
logger.info(f"Prior FEA trials: {len(self.prior_data)}")
|
||||
logger.info(f"New trials to run: {n_trials}")
|
||||
logger.info(f"Objectives: {OBJ_NAMES}")
|
||||
logger.info(f"Weights: {OBJ_WEIGHTS}")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# Create or load study with TPE sampler
|
||||
tpe_settings = self.config.get('tpe_settings', {})
|
||||
sampler = TPESampler(
|
||||
n_startup_trials=tpe_settings.get('n_startup_trials', 10),
|
||||
n_ei_candidates=tpe_settings.get('n_ei_candidates', 24),
|
||||
multivariate=tpe_settings.get('multivariate', True),
|
||||
constant_liar=tpe_settings.get('constant_liar', True),
|
||||
seed=tpe_settings.get('seed', 42)
|
||||
)
|
||||
|
||||
study = optuna.create_study(
|
||||
study_name="v14_tpe",
|
||||
storage=self.storage,
|
||||
direction='minimize', # Single objective - minimize weighted sum
|
||||
sampler=sampler,
|
||||
load_if_exists=resume
|
||||
)
|
||||
|
||||
# Seed with prior data if starting fresh
|
||||
if not resume or len(study.trials) == 0:
|
||||
self.seed_from_prior(study)
|
||||
|
||||
self.trial_count = self._get_next_trial_number()
|
||||
logger.info(f"Starting from trial {self.trial_count}")
|
||||
|
||||
# Run optimization
|
||||
def objective(trial: optuna.Trial) -> float:
|
||||
# Sample parameters
|
||||
params = {}
|
||||
for var in self.config['design_variables']:
|
||||
if var.get('enabled', False):
|
||||
params[var['name']] = trial.suggest_float(var['name'], var['min'], var['max'])
|
||||
|
||||
# Run FEA
|
||||
result = self.fea_runner.run_fea(params, self.trial_count)
|
||||
self.trial_count += 1
|
||||
|
||||
if result is None:
|
||||
# Return worst-case value for failed trials
|
||||
return 10000.0
|
||||
|
||||
# Store objectives as user attributes
|
||||
trial.set_user_attr('source', 'FEA')
|
||||
trial.set_user_attr('rel_filtered_rms_40_vs_20', result['objectives']['rel_filtered_rms_40_vs_20'])
|
||||
trial.set_user_attr('rel_filtered_rms_60_vs_20', result['objectives']['rel_filtered_rms_60_vs_20'])
|
||||
trial.set_user_attr('mfg_90_optician_workload', result['objectives']['mfg_90_optician_workload'])
|
||||
trial.set_user_attr('weighted_sum', result['weighted_sum'])
|
||||
trial.set_user_attr('solve_time', result.get('solve_time', 0))
|
||||
|
||||
# Track best
|
||||
if result['weighted_sum'] < self.best_value:
|
||||
self.best_value = result['weighted_sum']
|
||||
logger.info(f" [NEW BEST] Weighted Sum: {self.best_value:.2f}")
|
||||
|
||||
return result['weighted_sum']
|
||||
|
||||
# Run
|
||||
try:
|
||||
study.optimize(
|
||||
objective,
|
||||
n_trials=n_trials,
|
||||
show_progress_bar=True,
|
||||
gc_after_trial=True
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("\nOptimization interrupted by user")
|
||||
finally:
|
||||
self.fea_runner.cleanup()
|
||||
|
||||
# Print results
|
||||
elapsed = time.time() - start_time
|
||||
self._print_results(study, elapsed)
|
||||
|
||||
def _print_results(self, study: optuna.Study, elapsed: float):
|
||||
"""Print optimization results."""
|
||||
logger.info("\n" + "=" * 70)
|
||||
logger.info("OPTIMIZATION COMPLETE")
|
||||
logger.info("=" * 70)
|
||||
logger.info(f"Time: {elapsed/60:.1f} min ({elapsed/3600:.2f} hours)")
|
||||
logger.info(f"Total trials: {len(study.trials)}")
|
||||
|
||||
# Get best trial
|
||||
best_trial = study.best_trial
|
||||
logger.info(f"\nBest Trial: #{best_trial.number}")
|
||||
logger.info(f" Weighted Sum: {best_trial.value:.2f}")
|
||||
logger.info(f" 40-20: {best_trial.user_attrs.get('rel_filtered_rms_40_vs_20', 'N/A'):.2f} nm")
|
||||
logger.info(f" 60-20: {best_trial.user_attrs.get('rel_filtered_rms_60_vs_20', 'N/A'):.2f} nm")
|
||||
logger.info(f" Mfg: {best_trial.user_attrs.get('mfg_90_optician_workload', 'N/A'):.2f} nm")
|
||||
|
||||
logger.info("\nBest Parameters:")
|
||||
for name, value in best_trial.params.items():
|
||||
logger.info(f" {name}: {value:.4f}")
|
||||
|
||||
# Find top 10 trials
|
||||
sorted_trials = sorted(
|
||||
[t for t in study.trials if t.value is not None and t.value < 10000],
|
||||
key=lambda t: t.value
|
||||
)[:10]
|
||||
|
||||
logger.info("\nTop 10 Trials:")
|
||||
logger.info("-" * 90)
|
||||
logger.info(f"{'#':>4} {'WeightedSum':>12} {'40-20 (nm)':>12} {'60-20 (nm)':>12} {'Mfg (nm)':>12} {'Source':>10}")
|
||||
logger.info("-" * 90)
|
||||
|
||||
for trial in sorted_trials:
|
||||
source = trial.user_attrs.get('source', 'unknown')[:10]
|
||||
logger.info(
|
||||
f"{trial.number:>4} "
|
||||
f"{trial.value:>12.2f} "
|
||||
f"{trial.user_attrs.get('rel_filtered_rms_40_vs_20', 0):>12.2f} "
|
||||
f"{trial.user_attrs.get('rel_filtered_rms_60_vs_20', 0):>12.2f} "
|
||||
f"{trial.user_attrs.get('mfg_90_optician_workload', 0):>12.2f} "
|
||||
f"{source:>10}"
|
||||
)
|
||||
|
||||
# Save results
|
||||
results = {
|
||||
'summary': {
|
||||
'total_trials': len(study.trials),
|
||||
'best_weighted_sum': best_trial.value,
|
||||
'elapsed_hours': elapsed / 3600
|
||||
},
|
||||
'best_trial': {
|
||||
'number': best_trial.number,
|
||||
'params': best_trial.params,
|
||||
'objectives': {
|
||||
'rel_filtered_rms_40_vs_20': best_trial.user_attrs.get('rel_filtered_rms_40_vs_20'),
|
||||
'rel_filtered_rms_60_vs_20': best_trial.user_attrs.get('rel_filtered_rms_60_vs_20'),
|
||||
'mfg_90_optician_workload': best_trial.user_attrs.get('mfg_90_optician_workload'),
|
||||
},
|
||||
'weighted_sum': best_trial.value
|
||||
},
|
||||
'top_10': [
|
||||
{
|
||||
'trial': t.number,
|
||||
'weighted_sum': t.value,
|
||||
'params': t.params,
|
||||
'objectives': {
|
||||
'rel_filtered_rms_40_vs_20': t.user_attrs.get('rel_filtered_rms_40_vs_20'),
|
||||
'rel_filtered_rms_60_vs_20': t.user_attrs.get('rel_filtered_rms_60_vs_20'),
|
||||
'mfg_90_optician_workload': t.user_attrs.get('mfg_90_optician_workload'),
|
||||
}
|
||||
}
|
||||
for t in sorted_trials
|
||||
]
|
||||
}
|
||||
|
||||
with open(RESULTS_DIR / 'final_results.json', 'w') as f:
|
||||
json.dump(results, f, indent=2)
|
||||
|
||||
logger.info(f"\nResults saved to {RESULTS_DIR / 'final_results.json'}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='M1 Mirror TPE V14')
|
||||
parser.add_argument('--start', action='store_true', help='Start optimization')
|
||||
parser.add_argument('--trials', type=int, default=50, help='Number of new FEA trials')
|
||||
parser.add_argument('--resume', action='store_true', help='Resume from existing study')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.start:
|
||||
print("M1 Mirror TPE FEA Optimization V14")
|
||||
print("=" * 50)
|
||||
print("\nUsage:")
|
||||
print(" python run_optimization.py --start")
|
||||
print(" python run_optimization.py --start --trials 55")
|
||||
print(" python run_optimization.py --start --trials 55 --resume")
|
||||
print("\nFor 8-hour overnight run (~55 trials at 8-9 min/trial):")
|
||||
print(" python run_optimization.py --start --trials 55")
|
||||
print("\nThis will:")
|
||||
print(" 1. Load FEA trials from V11, V12, V13 databases")
|
||||
print(" 2. Seed TPE with all prior FEA data")
|
||||
print(" 3. Run TPE optimization with weighted-sum objective")
|
||||
print(" 4. Weights: 5*obj_40 + 5*obj_60 + 1*obj_mfg")
|
||||
return
|
||||
|
||||
with open(CONFIG_PATH, 'r') as f:
|
||||
config = json.load(f)
|
||||
|
||||
optimizer = TPEOptimizer(config)
|
||||
optimizer.run(n_trials=args.trials, resume=args.resume)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user