feat: Major update - Physics docs, Zernike OPD, insights, NX journals, tools
Documentation: - Add docs/06_PHYSICS/ with Zernike fundamentals and OPD method docs - Add docs/guides/CMA-ES_EXPLAINED.md optimization guide - Update CLAUDE.md and ATOMIZER_CONTEXT.md with current architecture - Update OP_01_CREATE_STUDY protocol Planning: - Add DYNAMIC_RESPONSE plans for random vibration/PSD support - Add OPTIMIZATION_ENGINE_MIGRATION_PLAN for code reorganization Insights System: - Update design_space, modal_analysis, stress_field, thermal_field insights - Improve error handling and data validation NX Journals: - Add analyze_wfe_zernike.py for Zernike WFE analysis - Add capture_study_images.py for automated screenshots - Add extract_expressions.py and introspect_part.py utilities - Add user_generated_journals/journal_top_view_image_taking.py Tests & Tools: - Add comprehensive Zernike OPD test suite - Add audit_v10 tests for WFE validation - Add tools for Pareto graphs and mirror data extraction - Add migrate_studies_to_topics.py utility Knowledge Base: - Initialize LAC (Learning Atomizer Core) with failure/success patterns Dashboard: - Update Setup.tsx and launch_dashboard.py - Add restart-dev.bat helper script 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
388
tools/create_pareto_graphs.py
Normal file
388
tools/create_pareto_graphs.py
Normal file
@@ -0,0 +1,388 @@
|
||||
"""
|
||||
Create Pareto front visualizations for M1 Mirror optimization data.
|
||||
Shows relationship between geometric parameters and 60/20 WFE performance.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import LinearSegmentedColormap
|
||||
import matplotlib.patches as mpatches
|
||||
|
||||
# Set style for publication-quality plots
|
||||
plt.rcParams.update({
|
||||
'font.family': 'sans-serif',
|
||||
'font.sans-serif': ['Arial', 'Helvetica', 'DejaVu Sans'],
|
||||
'font.size': 11,
|
||||
'axes.titlesize': 16,
|
||||
'axes.labelsize': 13,
|
||||
'xtick.labelsize': 11,
|
||||
'ytick.labelsize': 11,
|
||||
'legend.fontsize': 10,
|
||||
'figure.dpi': 150,
|
||||
'savefig.dpi': 150,
|
||||
'axes.spines.top': False,
|
||||
'axes.spines.right': False,
|
||||
})
|
||||
|
||||
# Load data
|
||||
df = pd.read_csv(r'c:\Users\antoi\Atomizer\studies\m1_mirror_all_trials_export.csv')
|
||||
|
||||
print("=== Data Overview ===")
|
||||
print(f"Total rows: {len(df)}")
|
||||
print(f"\nColumn data availability:")
|
||||
for col in df.columns:
|
||||
non_null = df[col].notna().sum()
|
||||
if non_null > 0:
|
||||
print(f" {col}: {non_null} ({100*non_null/len(df):.1f}%)")
|
||||
|
||||
print(f"\nStudies: {df['study'].unique()}")
|
||||
|
||||
# Filter for rows with the key parameters
|
||||
thickness_col = 'center_thickness'
|
||||
angle_col = 'blank_backface_angle'
|
||||
wfe_col = 'rel_filtered_rms_60_vs_20'
|
||||
|
||||
print(f"\n=== Key columns ===")
|
||||
print(f"center_thickness non-null: {df[thickness_col].notna().sum()}")
|
||||
print(f"blank_backface_angle non-null: {df[angle_col].notna().sum()}")
|
||||
print(f"rel_filtered_rms_60_vs_20 non-null: {df[wfe_col].notna().sum()}")
|
||||
|
||||
# Create filtered dataset with valid WFE values
|
||||
df_valid = df[df[wfe_col].notna()].copy()
|
||||
print(f"\nRows with valid WFE data (before outlier removal): {len(df_valid)}")
|
||||
|
||||
if len(df_valid) == 0:
|
||||
print("No valid WFE data found!")
|
||||
exit()
|
||||
|
||||
# Remove outliers - WFE values above 1000 are clearly failed simulations
|
||||
WFE_THRESHOLD = 100 # Reasonable upper bound for WFE ratio
|
||||
df_valid = df_valid[df_valid[wfe_col] < WFE_THRESHOLD].copy()
|
||||
print(f"Rows with valid WFE data (after outlier removal, WFE < {WFE_THRESHOLD}): {len(df_valid)}")
|
||||
|
||||
# Show ranges
|
||||
print(f"\n=== Value ranges (clean data) ===")
|
||||
if df_valid[thickness_col].notna().any():
|
||||
print(f"center_thickness: {df_valid[thickness_col].min():.2f} - {df_valid[thickness_col].max():.2f} mm")
|
||||
if df_valid[angle_col].notna().any():
|
||||
print(f"blank_backface_angle: {df_valid[angle_col].min():.2f} - {df_valid[angle_col].max():.2f}°")
|
||||
print(f"rel_filtered_rms_60_vs_20: {df_valid[wfe_col].min():.4f} - {df_valid[wfe_col].max():.4f}")
|
||||
|
||||
# Also check mass
|
||||
if 'mass_kg' in df_valid.columns and df_valid['mass_kg'].notna().any():
|
||||
print(f"mass_kg: {df_valid['mass_kg'].min():.2f} - {df_valid['mass_kg'].max():.2f} kg")
|
||||
|
||||
|
||||
def compute_pareto_front(x, y, minimize_x=True, minimize_y=True):
|
||||
"""
|
||||
Compute Pareto front indices.
|
||||
Returns indices of points on the Pareto front.
|
||||
"""
|
||||
# Create array of points
|
||||
points = np.column_stack([x, y])
|
||||
n_points = len(points)
|
||||
|
||||
# Adjust for minimization/maximization
|
||||
if not minimize_x:
|
||||
points[:, 0] = -points[:, 0]
|
||||
if not minimize_y:
|
||||
points[:, 1] = -points[:, 1]
|
||||
|
||||
# Find Pareto front
|
||||
pareto_mask = np.ones(n_points, dtype=bool)
|
||||
|
||||
for i in range(n_points):
|
||||
if pareto_mask[i]:
|
||||
# Check if any other point dominates point i
|
||||
for j in range(n_points):
|
||||
if i != j and pareto_mask[j]:
|
||||
# j dominates i if j is <= in all objectives and < in at least one
|
||||
if (points[j, 0] <= points[i, 0] and points[j, 1] <= points[i, 1] and
|
||||
(points[j, 0] < points[i, 0] or points[j, 1] < points[i, 1])):
|
||||
pareto_mask[i] = False
|
||||
break
|
||||
|
||||
return np.where(pareto_mask)[0]
|
||||
|
||||
|
||||
def create_pareto_plot(df_plot, x_col, y_col, x_label, y_label, title, filename,
|
||||
minimize_x=True, minimize_y=True, color_by=None, color_label=None):
|
||||
"""Create a publication-quality Pareto front plot."""
|
||||
|
||||
# Filter valid data
|
||||
mask = df_plot[x_col].notna() & df_plot[y_col].notna()
|
||||
df_clean = df_plot[mask].copy()
|
||||
|
||||
if len(df_clean) < 2:
|
||||
print(f"Not enough data for {title}")
|
||||
return
|
||||
|
||||
x = df_clean[x_col].values
|
||||
y = df_clean[y_col].values
|
||||
|
||||
# Compute Pareto front
|
||||
pareto_idx = compute_pareto_front(x, y, minimize_x, minimize_y)
|
||||
|
||||
# Sort Pareto points by x for line drawing
|
||||
pareto_points = np.column_stack([x[pareto_idx], y[pareto_idx]])
|
||||
sort_idx = np.argsort(pareto_points[:, 0])
|
||||
pareto_sorted = pareto_points[sort_idx]
|
||||
|
||||
# Create figure with professional styling
|
||||
fig, ax = plt.subplots(figsize=(12, 8))
|
||||
fig.patch.set_facecolor('white')
|
||||
|
||||
# Professional color palette
|
||||
bg_color = '#f8f9fa'
|
||||
grid_color = '#dee2e6'
|
||||
point_color = '#6c757d'
|
||||
pareto_color = '#dc3545'
|
||||
pareto_fill = '#ffc107'
|
||||
|
||||
ax.set_facecolor(bg_color)
|
||||
|
||||
# Color scheme - use mass as color if available
|
||||
if color_by is not None and color_by in df_clean.columns and df_clean[color_by].notna().sum() > 10:
|
||||
# Only use color if we have enough colored points
|
||||
color_mask = df_clean[color_by].notna()
|
||||
colors = df_clean.loc[color_mask, color_by].values
|
||||
|
||||
# Plot non-colored points in gray
|
||||
ax.scatter(x[~color_mask.values], y[~color_mask.values],
|
||||
c=point_color, alpha=0.3, s=40,
|
||||
edgecolors='white', linewidth=0.3, zorder=2)
|
||||
|
||||
# Plot colored points
|
||||
scatter = ax.scatter(x[color_mask.values], y[color_mask.values],
|
||||
c=colors, cmap='plasma', alpha=0.7, s=60,
|
||||
edgecolors='white', linewidth=0.5, zorder=2)
|
||||
cbar = plt.colorbar(scatter, ax=ax, pad=0.02, shrink=0.8)
|
||||
cbar.set_label(color_label or color_by, fontsize=12, fontweight='bold')
|
||||
cbar.ax.tick_params(labelsize=10)
|
||||
else:
|
||||
ax.scatter(x, y, c=point_color, alpha=0.4, s=50,
|
||||
edgecolors='white', linewidth=0.3, zorder=2, label='Design candidates')
|
||||
|
||||
# Draw Pareto front fill area (visual emphasis)
|
||||
if len(pareto_sorted) > 1:
|
||||
# Smooth interpolation for the Pareto front line
|
||||
from scipy.interpolate import interp1d
|
||||
if len(pareto_sorted) >= 4:
|
||||
# Use cubic interpolation for smooth curve
|
||||
try:
|
||||
f = interp1d(pareto_sorted[:, 0], pareto_sorted[:, 1], kind='cubic')
|
||||
x_smooth = np.linspace(pareto_sorted[:, 0].min(), pareto_sorted[:, 0].max(), 100)
|
||||
y_smooth = f(x_smooth)
|
||||
ax.plot(x_smooth, y_smooth, color=pareto_color, linewidth=3, alpha=0.9, zorder=3)
|
||||
except:
|
||||
ax.plot(pareto_sorted[:, 0], pareto_sorted[:, 1], color=pareto_color,
|
||||
linewidth=3, alpha=0.9, zorder=3)
|
||||
else:
|
||||
ax.plot(pareto_sorted[:, 0], pareto_sorted[:, 1], color=pareto_color,
|
||||
linewidth=3, alpha=0.9, zorder=3)
|
||||
|
||||
# Plot Pareto front points with emphasis
|
||||
ax.scatter(x[pareto_idx], y[pareto_idx], c=pareto_fill, s=180,
|
||||
edgecolors=pareto_color, linewidth=2.5, zorder=5,
|
||||
label=f'Pareto optimal ({len(pareto_idx)} designs)')
|
||||
|
||||
# Styling
|
||||
ax.set_xlabel(x_label, fontsize=14, fontweight='bold', labelpad=12)
|
||||
ax.set_ylabel(y_label, fontsize=14, fontweight='bold', labelpad=12)
|
||||
ax.set_title(title, fontsize=18, fontweight='bold', pad=20, color='#212529')
|
||||
|
||||
# Refined grid
|
||||
ax.grid(True, alpha=0.5, linestyle='-', linewidth=0.5, color=grid_color)
|
||||
ax.set_axisbelow(True)
|
||||
|
||||
# Add minor grid
|
||||
ax.minorticks_on()
|
||||
ax.grid(True, which='minor', alpha=0.2, linestyle=':', linewidth=0.3, color=grid_color)
|
||||
|
||||
# Legend with professional styling
|
||||
legend = ax.legend(loc='upper right', fontsize=11, framealpha=0.95,
|
||||
edgecolor=grid_color, fancybox=True, shadow=True)
|
||||
|
||||
# Add annotation for best point
|
||||
if minimize_y:
|
||||
best_idx = pareto_idx[np.argmin(y[pareto_idx])]
|
||||
else:
|
||||
best_idx = pareto_idx[np.argmax(y[pareto_idx])]
|
||||
|
||||
# Professional annotation box - position dynamically based on data location
|
||||
# Determine best quadrant for annotation
|
||||
x_range = x.max() - x.min()
|
||||
y_range = y.max() - y.min()
|
||||
x_mid = x.min() + x_range / 2
|
||||
y_mid = y.min() + y_range / 2
|
||||
|
||||
# Place annotation away from the best point
|
||||
if x[best_idx] < x_mid:
|
||||
x_offset = 50
|
||||
else:
|
||||
x_offset = -120
|
||||
if y[best_idx] < y_mid:
|
||||
y_offset = 50
|
||||
else:
|
||||
y_offset = -60
|
||||
|
||||
ax.annotate(f'Best WFE: {y[best_idx]:.2f}\n{x_label.split()[0]}: {x[best_idx]:.1f}',
|
||||
xy=(x[best_idx], y[best_idx]),
|
||||
xytext=(x_offset, y_offset), textcoords='offset points',
|
||||
fontsize=11, fontweight='bold',
|
||||
bbox=dict(boxstyle='round,pad=0.6', facecolor='white',
|
||||
edgecolor=pareto_color, linewidth=2, alpha=0.95),
|
||||
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.2',
|
||||
color=pareto_color, lw=2))
|
||||
|
||||
# Statistics box in bottom left
|
||||
stats_text = f'Total designs explored: {len(df_clean):,}\nPareto optimal: {len(pareto_idx)}'
|
||||
ax.text(0.02, 0.02, stats_text, transform=ax.transAxes, fontsize=10,
|
||||
verticalalignment='bottom',
|
||||
bbox=dict(boxstyle='round,pad=0.5', facecolor='white',
|
||||
edgecolor=grid_color, alpha=0.9))
|
||||
|
||||
# Adjust spines
|
||||
for spine in ax.spines.values():
|
||||
spine.set_color(grid_color)
|
||||
spine.set_linewidth(1.5)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(filename, dpi=200, bbox_inches='tight', facecolor='white',
|
||||
edgecolor='none', pad_inches=0.2)
|
||||
plt.close()
|
||||
print(f"Saved: {filename}")
|
||||
|
||||
return pareto_sorted, pareto_idx
|
||||
|
||||
|
||||
# Create plots
|
||||
output_dir = r'c:\Users\antoi\Atomizer\studies'
|
||||
|
||||
# 1. Blank Thickness vs 60/20 WFE
|
||||
print("\n--- Creating Blank Thickness vs WFE plot ---")
|
||||
if df_valid[thickness_col].notna().any():
|
||||
result = create_pareto_plot(
|
||||
df_valid,
|
||||
x_col=thickness_col,
|
||||
y_col=wfe_col,
|
||||
x_label='Blank Thickness (mm)',
|
||||
y_label='60/20 WFE (Relative RMS)',
|
||||
title='M1 Mirror Optimization\nBlank Thickness vs Wavefront Error',
|
||||
filename=f'{output_dir}\\pareto_thickness_vs_wfe.png',
|
||||
minimize_x=False, # Thinner may be desirable
|
||||
minimize_y=True, # Lower WFE is better
|
||||
color_by='mass_kg' if 'mass_kg' in df_valid.columns else None,
|
||||
color_label='Mass (kg)'
|
||||
)
|
||||
else:
|
||||
print("No thickness data available")
|
||||
|
||||
# 2. Blank Backface Angle vs 60/20 WFE
|
||||
print("\n--- Creating Backface Angle vs WFE plot ---")
|
||||
if df_valid[angle_col].notna().any():
|
||||
result = create_pareto_plot(
|
||||
df_valid,
|
||||
x_col=angle_col,
|
||||
y_col=wfe_col,
|
||||
x_label='Blank Backface Angle (degrees)',
|
||||
y_label='60/20 WFE (Relative RMS)',
|
||||
title='M1 Mirror Optimization\nBackface Angle vs Wavefront Error',
|
||||
filename=f'{output_dir}\\pareto_angle_vs_wfe.png',
|
||||
minimize_x=False,
|
||||
minimize_y=True,
|
||||
color_by='mass_kg' if 'mass_kg' in df_valid.columns else None,
|
||||
color_label='Mass (kg)'
|
||||
)
|
||||
else:
|
||||
print("No backface angle data available")
|
||||
|
||||
# 3. Combined 2D Design Space plot
|
||||
print("\n--- Creating Design Space plot ---")
|
||||
if df_valid[thickness_col].notna().any() and df_valid[angle_col].notna().any():
|
||||
mask = df_valid[thickness_col].notna() & df_valid[angle_col].notna()
|
||||
df_both = df_valid[mask].copy()
|
||||
|
||||
if len(df_both) > 0:
|
||||
fig, ax = plt.subplots(figsize=(12, 9))
|
||||
fig.patch.set_facecolor('white')
|
||||
|
||||
bg_color = '#f8f9fa'
|
||||
grid_color = '#dee2e6'
|
||||
ax.set_facecolor(bg_color)
|
||||
|
||||
# Use a perceptually uniform colormap
|
||||
scatter = ax.scatter(
|
||||
df_both[thickness_col],
|
||||
df_both[angle_col],
|
||||
c=df_both[wfe_col],
|
||||
cmap='RdYlGn_r', # Red=bad (high WFE), Green=good (low WFE)
|
||||
s=100,
|
||||
alpha=0.8,
|
||||
edgecolors='white',
|
||||
linewidth=0.5,
|
||||
vmin=df_both[wfe_col].quantile(0.05),
|
||||
vmax=df_both[wfe_col].quantile(0.95)
|
||||
)
|
||||
|
||||
cbar = plt.colorbar(scatter, ax=ax, pad=0.02, shrink=0.85)
|
||||
cbar.set_label('60/20 WFE (Relative RMS)\nLower = Better Performance',
|
||||
fontsize=12, fontweight='bold')
|
||||
cbar.ax.tick_params(labelsize=10)
|
||||
|
||||
ax.set_xlabel('Blank Thickness (mm)', fontsize=14, fontweight='bold', labelpad=12)
|
||||
ax.set_ylabel('Blank Backface Angle (degrees)', fontsize=14, fontweight='bold', labelpad=12)
|
||||
ax.set_title('M1 Mirror Design Space Exploration\nGeometric Parameters vs Optical Performance',
|
||||
fontsize=18, fontweight='bold', pad=20)
|
||||
|
||||
ax.grid(True, alpha=0.5, color=grid_color)
|
||||
ax.minorticks_on()
|
||||
ax.grid(True, which='minor', alpha=0.2, linestyle=':', color=grid_color)
|
||||
|
||||
# Mark best point with star
|
||||
best_idx = df_both[wfe_col].idxmin()
|
||||
best_row = df_both.loc[best_idx]
|
||||
ax.scatter(best_row[thickness_col], best_row[angle_col],
|
||||
c='#ffc107', s=400, marker='*', edgecolors='#dc3545', linewidth=3,
|
||||
zorder=5, label=f'Best Design (WFE={best_row[wfe_col]:.2f})')
|
||||
|
||||
# Add annotation for best point - position in upper left to avoid overlap
|
||||
ax.annotate(f'Best Design\nThickness: {best_row[thickness_col]:.1f}mm\nAngle: {best_row[angle_col]:.2f}°\nWFE: {best_row[wfe_col]:.2f}',
|
||||
xy=(best_row[thickness_col], best_row[angle_col]),
|
||||
xytext=(-100, 60), textcoords='offset points',
|
||||
fontsize=10, fontweight='bold',
|
||||
bbox=dict(boxstyle='round,pad=0.6', facecolor='white',
|
||||
edgecolor='#dc3545', linewidth=2, alpha=0.95),
|
||||
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.3',
|
||||
color='#dc3545', lw=2))
|
||||
|
||||
ax.legend(loc='upper right', fontsize=11, framealpha=0.95, fancybox=True, shadow=True)
|
||||
|
||||
# Stats
|
||||
stats_text = f'Designs evaluated: {len(df_both):,}'
|
||||
ax.text(0.02, 0.02, stats_text, transform=ax.transAxes, fontsize=10,
|
||||
verticalalignment='bottom',
|
||||
bbox=dict(boxstyle='round,pad=0.5', facecolor='white',
|
||||
edgecolor=grid_color, alpha=0.9))
|
||||
|
||||
for spine in ax.spines.values():
|
||||
spine.set_color(grid_color)
|
||||
spine.set_linewidth(1.5)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(f'{output_dir}\\design_space_wfe.png',
|
||||
dpi=200, bbox_inches='tight', facecolor='white', pad_inches=0.2)
|
||||
plt.close()
|
||||
print(f"Saved: design_space_wfe.png")
|
||||
else:
|
||||
print("Not enough data for combined design space plot")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("PARETO VISUALIZATION COMPLETE")
|
||||
print("="*60)
|
||||
print(f"\nOutput files saved to: {output_dir}")
|
||||
print("\nFiles created:")
|
||||
print(" 1. pareto_thickness_vs_wfe.png - Thickness vs WFE Pareto front")
|
||||
print(" 2. pareto_angle_vs_wfe.png - Backface Angle vs WFE Pareto front")
|
||||
print(" 3. design_space_wfe.png - Combined design space heatmap")
|
||||
192
tools/extract_all_mirror_data.py
Normal file
192
tools/extract_all_mirror_data.py
Normal file
@@ -0,0 +1,192 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Extract all M1 mirror optimization trial data from Optuna study databases.
|
||||
Outputs a consolidated CSV file with all parameters and objectives.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import json
|
||||
import csv
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
|
||||
# Studies to extract (in order)
|
||||
STUDIES = [
|
||||
"m1_mirror_zernike_optimization",
|
||||
"m1_mirror_adaptive_V11",
|
||||
"m1_mirror_adaptive_V13",
|
||||
"m1_mirror_adaptive_V14",
|
||||
"m1_mirror_adaptive_V15",
|
||||
"m1_mirror_cost_reduction",
|
||||
"m1_mirror_cost_reduction_V2",
|
||||
]
|
||||
|
||||
# All possible design variables (superset across all studies)
|
||||
DESIGN_VARS = [
|
||||
"lateral_inner_angle",
|
||||
"lateral_outer_angle",
|
||||
"lateral_outer_pivot",
|
||||
"lateral_inner_pivot",
|
||||
"lateral_middle_pivot",
|
||||
"lateral_closeness",
|
||||
"whiffle_min",
|
||||
"whiffle_outer_to_vertical",
|
||||
"whiffle_triangle_closeness",
|
||||
"blank_backface_angle",
|
||||
"inner_circular_rib_dia",
|
||||
"center_thickness",
|
||||
]
|
||||
|
||||
# All objectives
|
||||
OBJECTIVES = [
|
||||
"rel_filtered_rms_40_vs_20",
|
||||
"rel_filtered_rms_60_vs_20",
|
||||
"mfg_90_optician_workload",
|
||||
"mass_kg",
|
||||
]
|
||||
|
||||
|
||||
def get_db_path(study_name: str) -> Path:
|
||||
"""Get the database path for a study."""
|
||||
# Check in M1_Mirror topic folder first (new structure)
|
||||
base = Path(__file__).parent / "studies" / "M1_Mirror" / study_name
|
||||
for subdir in ["3_results", "2_results"]:
|
||||
db_path = base / subdir / "study.db"
|
||||
if db_path.exists():
|
||||
return db_path
|
||||
# Fallback to flat structure (backwards compatibility)
|
||||
base = Path(__file__).parent / "studies" / study_name
|
||||
for subdir in ["3_results", "2_results"]:
|
||||
db_path = base / subdir / "study.db"
|
||||
if db_path.exists():
|
||||
return db_path
|
||||
return None
|
||||
|
||||
|
||||
def get_config_path(study_name: str) -> Path:
|
||||
"""Get the config path for a study."""
|
||||
# Check in M1_Mirror topic folder first (new structure)
|
||||
config_path = Path(__file__).parent / "studies" / "M1_Mirror" / study_name / "1_setup" / "optimization_config.json"
|
||||
if config_path.exists():
|
||||
return config_path
|
||||
# Fallback to flat structure
|
||||
return Path(__file__).parent / "studies" / study_name / "1_setup" / "optimization_config.json"
|
||||
|
||||
|
||||
def load_objective_mapping(config_path: Path) -> dict:
|
||||
"""Load objective names from config to map objective_id to name."""
|
||||
with open(config_path) as f:
|
||||
config = json.load(f)
|
||||
|
||||
objectives = config.get("objectives", [])
|
||||
# objective_id 0, 1, 2, ... maps to objectives in order
|
||||
return {i: obj["name"] for i, obj in enumerate(objectives)}
|
||||
|
||||
|
||||
def extract_trials_from_db(db_path: Path, obj_mapping: dict) -> list:
|
||||
"""Extract all completed trials from an Optuna study database."""
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get all completed trials
|
||||
cursor.execute("""
|
||||
SELECT trial_id FROM trials WHERE state = 'COMPLETE'
|
||||
""")
|
||||
trial_ids = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
trials = []
|
||||
for trial_id in trial_ids:
|
||||
trial_data = {"trial_id": trial_id}
|
||||
|
||||
# Get parameters
|
||||
cursor.execute("""
|
||||
SELECT param_name, param_value FROM trial_params WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
for param_name, param_value in cursor.fetchall():
|
||||
trial_data[param_name] = param_value
|
||||
|
||||
# Get individual objective values from user attributes
|
||||
# (Atomizer stores individual objectives here, weighted_sum in trial_values)
|
||||
cursor.execute("""
|
||||
SELECT key, value_json FROM trial_user_attributes WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
for key, value in cursor.fetchall():
|
||||
# The value is JSON-encoded (string with quotes for strings, plain for numbers)
|
||||
try:
|
||||
# Try to parse as float first
|
||||
trial_data[key] = float(value)
|
||||
except ValueError:
|
||||
# Keep as string (e.g., source tag)
|
||||
trial_data[key] = value.strip('"')
|
||||
|
||||
trials.append(trial_data)
|
||||
|
||||
conn.close()
|
||||
return trials
|
||||
|
||||
|
||||
def main():
|
||||
studies_dir = Path(__file__).parent / "studies"
|
||||
output_path = studies_dir / "m1_mirror_all_trials_export.csv"
|
||||
|
||||
# CSV header
|
||||
header = ["study", "trial"] + DESIGN_VARS + OBJECTIVES
|
||||
|
||||
all_rows = []
|
||||
stats = {}
|
||||
|
||||
for study_name in STUDIES:
|
||||
db_path = get_db_path(study_name)
|
||||
config_path = get_config_path(study_name)
|
||||
|
||||
if not db_path or not db_path.exists():
|
||||
print(f"[SKIP] {study_name}: No database found")
|
||||
stats[study_name] = 0
|
||||
continue
|
||||
|
||||
if not config_path.exists():
|
||||
print(f"[SKIP] {study_name}: No config found")
|
||||
stats[study_name] = 0
|
||||
continue
|
||||
|
||||
print(f"[LOAD] {study_name}...")
|
||||
|
||||
# Load objective mapping from config
|
||||
obj_mapping = load_objective_mapping(config_path)
|
||||
|
||||
# Extract trials
|
||||
trials = extract_trials_from_db(db_path, obj_mapping)
|
||||
stats[study_name] = len(trials)
|
||||
|
||||
# Convert to rows
|
||||
for trial in trials:
|
||||
row = {
|
||||
"study": study_name,
|
||||
"trial": trial["trial_id"],
|
||||
}
|
||||
# Add design variables
|
||||
for var in DESIGN_VARS:
|
||||
row[var] = trial.get(var, "")
|
||||
# Add objectives
|
||||
for obj in OBJECTIVES:
|
||||
row[obj] = trial.get(obj, "")
|
||||
|
||||
all_rows.append(row)
|
||||
|
||||
# Write CSV
|
||||
with open(output_path, "w", newline="") as f:
|
||||
writer = csv.DictWriter(f, fieldnames=header)
|
||||
writer.writeheader()
|
||||
writer.writerows(all_rows)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"EXPORT COMPLETE: {output_path}")
|
||||
print(f"{'='*60}")
|
||||
print(f"\nTotal trials exported: {len(all_rows)}")
|
||||
print(f"\nTrials per study:")
|
||||
for study, count in stats.items():
|
||||
print(f" {study}: {count}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
294
tools/extract_mirror_optical_specs.py
Normal file
294
tools/extract_mirror_optical_specs.py
Normal file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Extract Mirror Optical Specifications from FEA Mesh Geometry
|
||||
|
||||
This tool analyzes mirror mesh geometry to estimate optical specifications
|
||||
including focal length, aperture diameter, f-number, and radius of curvature.
|
||||
|
||||
Usage:
|
||||
# From study directory containing OP2 files
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs .
|
||||
|
||||
# From specific OP2 file
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs path/to/results.op2
|
||||
|
||||
# Save to study README
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs . --update-readme
|
||||
|
||||
Output:
|
||||
- Console: Optical specifications summary
|
||||
- Optional: Updates parent README.md with validated specs
|
||||
|
||||
Author: Atomizer Framework
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
# Add project root to path (tools/ is at project root, so parent is Atomizer/)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def find_op2_file(path: Path) -> Path:
|
||||
"""Find an OP2 file from path (file or directory)."""
|
||||
path = Path(path)
|
||||
|
||||
if path.is_file() and path.suffix.lower() == '.op2':
|
||||
return path
|
||||
|
||||
if path.is_dir():
|
||||
# Look in common locations
|
||||
search_patterns = [
|
||||
'**/2_iterations/**/*.op2',
|
||||
'**/*.op2',
|
||||
'2_iterations/**/*.op2',
|
||||
'1_setup/model/*.op2',
|
||||
]
|
||||
|
||||
for pattern in search_patterns:
|
||||
op2_files = list(path.glob(pattern))
|
||||
if op2_files:
|
||||
# Return most recent
|
||||
return max(op2_files, key=lambda p: p.stat().st_mtime)
|
||||
|
||||
raise FileNotFoundError(f"No OP2 file found in {path}")
|
||||
|
||||
|
||||
def extract_optical_specs(op2_path: Path, verbose: bool = True) -> dict:
|
||||
"""
|
||||
Extract optical specifications from mirror mesh geometry.
|
||||
|
||||
Args:
|
||||
op2_path: Path to OP2 file
|
||||
verbose: Print detailed output
|
||||
|
||||
Returns:
|
||||
dict with optical specifications
|
||||
"""
|
||||
from optimization_engine.extractors.extract_zernike_opd import (
|
||||
ZernikeOPDExtractor,
|
||||
estimate_focal_length_from_geometry
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(f"Analyzing: {op2_path}")
|
||||
print("=" * 60)
|
||||
|
||||
extractor = ZernikeOPDExtractor(op2_path)
|
||||
|
||||
# Get geometry
|
||||
geo = extractor.node_geometry
|
||||
all_pos = np.array(list(geo.values()))
|
||||
x, y, z = all_pos[:, 0], all_pos[:, 1], all_pos[:, 2]
|
||||
|
||||
# Compute radius/diameter
|
||||
r = np.sqrt(x**2 + y**2)
|
||||
|
||||
# Estimate focal length
|
||||
focal = estimate_focal_length_from_geometry(x, y, z, concave=True)
|
||||
|
||||
# Derived quantities
|
||||
diameter = 2 * r.max()
|
||||
f_number = focal / diameter
|
||||
RoC = 2 * focal # Radius of curvature
|
||||
sag = r.max()**2 / (4 * focal) # Surface sag at edge
|
||||
central_obs = r.min() if r.min() > 1.0 else 0.0 # Central obscuration
|
||||
|
||||
# Parabola fit quality check
|
||||
r_sq = x**2 + y**2
|
||||
A = np.column_stack([r_sq, np.ones_like(r_sq)])
|
||||
coeffs, _, _, _ = np.linalg.lstsq(A, z, rcond=None)
|
||||
a, b = coeffs
|
||||
z_fit = a * r_sq + b
|
||||
rms_error = np.sqrt(np.mean((z - z_fit)**2))
|
||||
|
||||
# Determine fit quality
|
||||
if rms_error < 0.1:
|
||||
fit_quality = "Excellent"
|
||||
fit_note = "Focal length estimate is reliable"
|
||||
elif rms_error < 1.0:
|
||||
fit_quality = "Good"
|
||||
fit_note = "Focal length estimate is reasonably accurate"
|
||||
else:
|
||||
fit_quality = "Poor"
|
||||
fit_note = "Consider using explicit focal length from optical design"
|
||||
|
||||
specs = {
|
||||
'aperture_diameter_mm': diameter,
|
||||
'aperture_radius_mm': r.max(),
|
||||
'focal_length_mm': focal,
|
||||
'f_number': f_number,
|
||||
'radius_of_curvature_mm': RoC,
|
||||
'surface_sag_mm': sag,
|
||||
'central_obscuration_mm': central_obs,
|
||||
'node_count': len(geo),
|
||||
'x_range_mm': (x.min(), x.max()),
|
||||
'y_range_mm': (y.min(), y.max()),
|
||||
'z_range_mm': (z.min(), z.max()),
|
||||
'parabola_fit_rms_mm': rms_error,
|
||||
'fit_quality': fit_quality,
|
||||
'fit_note': fit_note,
|
||||
'source_file': str(op2_path),
|
||||
}
|
||||
|
||||
if verbose:
|
||||
print()
|
||||
print("MIRROR OPTICAL SPECIFICATIONS (from mesh geometry)")
|
||||
print("=" * 60)
|
||||
print()
|
||||
print(f"Aperture Diameter: {diameter:.1f} mm ({diameter/1000:.3f} m)")
|
||||
print(f"Aperture Radius: {r.max():.1f} mm")
|
||||
if central_obs > 0:
|
||||
print(f"Central Obscuration: {central_obs:.1f} mm")
|
||||
print()
|
||||
print(f"Estimated Focal Length: {focal:.1f} mm ({focal/1000:.3f} m)")
|
||||
print(f"Radius of Curvature: {RoC:.1f} mm ({RoC/1000:.3f} m)")
|
||||
print(f"f-number (f/D): f/{f_number:.2f}")
|
||||
print()
|
||||
print(f"Surface Sag at Edge: {sag:.2f} mm")
|
||||
print()
|
||||
print("--- Mesh Statistics ---")
|
||||
print(f"Node count: {len(geo)}")
|
||||
print(f"X range: {x.min():.1f} to {x.max():.1f} mm")
|
||||
print(f"Y range: {y.min():.1f} to {y.max():.1f} mm")
|
||||
print(f"Z range: {z.min():.2f} to {z.max():.2f} mm")
|
||||
print()
|
||||
print("--- Parabola Fit Quality ---")
|
||||
print(f"RMS fit residual: {rms_error:.4f} mm ({rms_error*1000:.2f} µm)")
|
||||
print(f"Quality: {fit_quality} - {fit_note}")
|
||||
print()
|
||||
print("=" * 60)
|
||||
|
||||
return specs
|
||||
|
||||
|
||||
def generate_readme_section(specs: dict) -> str:
|
||||
"""Generate markdown section for README."""
|
||||
return f"""## 2. Optical Prescription
|
||||
|
||||
> **Source**: Estimated from mesh geometry. Validate against optical design.
|
||||
|
||||
| Parameter | Value | Units | Status |
|
||||
|-----------|-------|-------|--------|
|
||||
| Aperture Diameter | {specs['aperture_diameter_mm']:.1f} | mm | Estimated |
|
||||
| Focal Length | {specs['focal_length_mm']:.1f} | mm | Estimated |
|
||||
| f-number | f/{specs['f_number']:.2f} | - | Computed |
|
||||
| Radius of Curvature | {specs['radius_of_curvature_mm']:.1f} | mm | Computed (2×f) |
|
||||
| Central Obscuration | {specs['central_obscuration_mm']:.1f} | mm | From mesh |
|
||||
| Surface Type | Parabola | - | Assumed |
|
||||
|
||||
**Fit Quality**: {specs['fit_quality']} ({specs['fit_note']})
|
||||
|
||||
### 2.1 Usage in OPD Extractor
|
||||
|
||||
For rigorous WFE analysis, use explicit focal length:
|
||||
|
||||
```python
|
||||
from optimization_engine.extractors import ZernikeOPDExtractor
|
||||
|
||||
extractor = ZernikeOPDExtractor(
|
||||
op2_file,
|
||||
focal_length={specs['focal_length_mm']:.1f}, # mm - validate against design
|
||||
concave=True
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def update_readme(study_dir: Path, specs: dict):
|
||||
"""Update parent README with optical specs."""
|
||||
readme_path = study_dir / 'README.md'
|
||||
|
||||
if not readme_path.exists():
|
||||
print(f"README.md not found at {readme_path}")
|
||||
return False
|
||||
|
||||
content = readme_path.read_text(encoding='utf-8')
|
||||
|
||||
# Find and replace optical prescription section
|
||||
new_section = generate_readme_section(specs)
|
||||
|
||||
# Look for existing section
|
||||
import re
|
||||
pattern = r'## 2\. Optical Prescription.*?(?=## 3\.|$)'
|
||||
|
||||
if re.search(pattern, content, re.DOTALL):
|
||||
content = re.sub(pattern, new_section + '\n---\n\n', content, flags=re.DOTALL)
|
||||
print(f"Updated optical prescription in {readme_path}")
|
||||
else:
|
||||
print(f"Could not find '## 2. Optical Prescription' section in {readme_path}")
|
||||
print("Please add manually or check section numbering.")
|
||||
return False
|
||||
|
||||
readme_path.write_text(content, encoding='utf-8')
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Extract mirror optical specifications from FEA mesh',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Analyze current study directory
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs .
|
||||
|
||||
# Analyze specific OP2 file
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs results.op2
|
||||
|
||||
# Update parent README with specs
|
||||
python -m optimization_engine.tools.extract_mirror_optical_specs . --update-readme
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('path', type=str,
|
||||
help='Path to OP2 file or study directory')
|
||||
parser.add_argument('--update-readme', action='store_true',
|
||||
help='Update parent README.md with optical specs')
|
||||
parser.add_argument('--quiet', '-q', action='store_true',
|
||||
help='Suppress detailed output')
|
||||
parser.add_argument('--json', action='store_true',
|
||||
help='Output specs as JSON')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
path = Path(args.path).resolve()
|
||||
op2_path = find_op2_file(path)
|
||||
|
||||
specs = extract_optical_specs(op2_path, verbose=not args.quiet and not args.json)
|
||||
|
||||
if args.json:
|
||||
import json
|
||||
print(json.dumps(specs, indent=2, default=str))
|
||||
|
||||
if args.update_readme:
|
||||
# Find study root (parent of geometry type folder)
|
||||
study_dir = path if path.is_dir() else path.parent
|
||||
# Go up to geometry type level
|
||||
while study_dir.name not in ['studies', ''] and not (study_dir / 'README.md').exists():
|
||||
if (study_dir.parent / 'README.md').exists():
|
||||
study_dir = study_dir.parent
|
||||
break
|
||||
study_dir = study_dir.parent
|
||||
|
||||
if (study_dir / 'README.md').exists():
|
||||
update_readme(study_dir, specs)
|
||||
else:
|
||||
print(f"Could not find parent README.md to update")
|
||||
|
||||
except FileNotFoundError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
155
tools/migrate_studies_to_topics.py
Normal file
155
tools/migrate_studies_to_topics.py
Normal file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Migration script to reorganize studies into topic-based subfolders.
|
||||
|
||||
Run with --dry-run first to preview changes:
|
||||
python migrate_studies_to_topics.py --dry-run
|
||||
|
||||
Then run without flag to execute:
|
||||
python migrate_studies_to_topics.py
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
STUDIES_DIR = Path(__file__).parent / "studies"
|
||||
|
||||
# Topic classification based on study name prefixes
|
||||
TOPIC_MAPPING = {
|
||||
'bracket_': 'Simple_Bracket',
|
||||
'drone_gimbal_': 'Drone_Gimbal',
|
||||
'm1_mirror_': 'M1_Mirror',
|
||||
'uav_arm_': 'UAV_Arm',
|
||||
'simple_beam_': 'Simple_Beam',
|
||||
}
|
||||
|
||||
# Files/folders to skip (not studies)
|
||||
SKIP_ITEMS = {
|
||||
'm1_mirror_all_trials_export.csv', # Data export file
|
||||
'.gitkeep',
|
||||
'__pycache__',
|
||||
}
|
||||
|
||||
|
||||
def classify_study(study_name: str) -> str:
|
||||
"""Determine which topic folder a study belongs to."""
|
||||
for prefix, topic in TOPIC_MAPPING.items():
|
||||
if study_name.startswith(prefix):
|
||||
return topic
|
||||
return '_Other'
|
||||
|
||||
|
||||
def get_studies_to_migrate():
|
||||
"""Get list of studies that need migration (not already in topic folders)."""
|
||||
studies = []
|
||||
|
||||
for item in STUDIES_DIR.iterdir():
|
||||
# Skip non-directories and special items
|
||||
if not item.is_dir():
|
||||
continue
|
||||
if item.name in SKIP_ITEMS:
|
||||
continue
|
||||
if item.name.startswith('.'):
|
||||
continue
|
||||
|
||||
# Check if this is already a topic folder (contains study subdirs)
|
||||
# A topic folder would have subdirs with 1_setup folders
|
||||
is_topic_folder = any(
|
||||
(sub / "1_setup").exists()
|
||||
for sub in item.iterdir()
|
||||
if sub.is_dir()
|
||||
)
|
||||
|
||||
if is_topic_folder:
|
||||
print(f"[SKIP] {item.name} - already a topic folder")
|
||||
continue
|
||||
|
||||
# Check if this is a study (has 1_setup or optimization_config.json)
|
||||
is_study = (
|
||||
(item / "1_setup").exists() or
|
||||
(item / "optimization_config.json").exists()
|
||||
)
|
||||
|
||||
if is_study:
|
||||
topic = classify_study(item.name)
|
||||
studies.append({
|
||||
'name': item.name,
|
||||
'source': item,
|
||||
'topic': topic,
|
||||
'target': STUDIES_DIR / topic / item.name
|
||||
})
|
||||
else:
|
||||
print(f"[SKIP] {item.name} - not a study (no 1_setup folder)")
|
||||
|
||||
return studies
|
||||
|
||||
|
||||
def migrate_studies(dry_run: bool = True):
|
||||
"""Migrate studies to topic folders."""
|
||||
studies = get_studies_to_migrate()
|
||||
|
||||
if not studies:
|
||||
print("\nNo studies to migrate. All studies are already organized.")
|
||||
return
|
||||
|
||||
# Group by topic for display
|
||||
by_topic = {}
|
||||
for s in studies:
|
||||
if s['topic'] not in by_topic:
|
||||
by_topic[s['topic']] = []
|
||||
by_topic[s['topic']].append(s)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("MIGRATION PLAN")
|
||||
print("="*60)
|
||||
|
||||
for topic in sorted(by_topic.keys()):
|
||||
print(f"\n{topic}/")
|
||||
for s in by_topic[topic]:
|
||||
print(f" +-- {s['name']}/")
|
||||
|
||||
print(f"\nTotal: {len(studies)} studies to migrate")
|
||||
|
||||
if dry_run:
|
||||
print("\n[DRY RUN] No changes made. Run without --dry-run to execute.")
|
||||
return
|
||||
|
||||
# Execute migration
|
||||
print("\n" + "="*60)
|
||||
print("EXECUTING MIGRATION")
|
||||
print("="*60)
|
||||
|
||||
# Create topic folders
|
||||
created_topics = set()
|
||||
for s in studies:
|
||||
topic_dir = STUDIES_DIR / s['topic']
|
||||
if s['topic'] not in created_topics:
|
||||
topic_dir.mkdir(exist_ok=True)
|
||||
created_topics.add(s['topic'])
|
||||
print(f"[CREATE] {s['topic']}/")
|
||||
|
||||
# Move studies
|
||||
for s in studies:
|
||||
try:
|
||||
shutil.move(str(s['source']), str(s['target']))
|
||||
print(f"[MOVE] {s['name']} -> {s['topic']}/{s['name']}")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Failed to move {s['name']}: {e}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("MIGRATION COMPLETE")
|
||||
print("="*60)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Migrate studies to topic folders")
|
||||
parser.add_argument('--dry-run', action='store_true',
|
||||
help='Preview changes without executing')
|
||||
args = parser.parse_args()
|
||||
|
||||
migrate_studies(dry_run=args.dry_run)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test script for Zernike extractor import."""
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add Atomizer root to path
|
||||
atomizer_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(atomizer_root))
|
||||
|
||||
print("Testing ZernikeExtractor import...")
|
||||
|
||||
try:
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
print(" Import: OK")
|
||||
|
||||
import inspect
|
||||
sig = inspect.signature(ZernikeExtractor.extract_relative)
|
||||
print(f" extract_relative signature: {sig}")
|
||||
|
||||
# Check parameters
|
||||
params = list(sig.parameters.keys())
|
||||
print(f" Parameters: {params}")
|
||||
|
||||
if 'include_coefficients' in params:
|
||||
print(" include_coefficients parameter: FOUND")
|
||||
else:
|
||||
print(" include_coefficients parameter: MISSING!")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
print("\nAll tests passed!")
|
||||
Reference in New Issue
Block a user