Files
Atomizer/optimization_engine/extractors/extract_zernike_opd.py

747 lines
25 KiB
Python
Raw Normal View History

"""
Analytic (Parabola-Based) Zernike Extractor for Telescope Mirror Optimization
==============================================================================
This extractor computes OPD using an ANALYTICAL parabola formula, accounting for
lateral (X, Y) displacements in addition to axial (Z) displacements.
NOTE: For most robust OPD analysis, prefer ZernikeOPDExtractor from extract_zernike_figure.py
which uses actual mesh geometry as reference (no shape assumptions).
This analytic extractor is useful when:
- You know the optical prescription (focal length)
- The surface is a parabola (or close to it)
- You want to compare against theoretical parabola
WHY LATERAL CORRECTION MATTERS:
-------------------------------
The standard Zernike approach uses original (x, y) coordinates with only Z-displacement.
This is INCORRECT when lateral displacements are significant because:
1. A node at (x₀, y₀) moves to (x₀+Δx, y₀+Δy, z₀+Δz)
2. The parabola surface at the NEW position has a different expected Z
3. The true surface error is: (z₀+Δz) - z_parabola(x₀+Δx, y₀+Δy)
PARABOLA EQUATION:
------------------
For a paraboloid with vertex at origin and optical axis along Z:
z = ( + ) / (4 * f)
where f = focal length = R / 2 (R = radius of curvature at vertex)
For a concave mirror (like telescope primary):
z = - / (4 * f) (negative because surface curves away from +Z)
We support both conventions via the `concave` parameter.
Author: Atomizer Framework
Date: 2024
"""
from pathlib import Path
from typing import Dict, Any, Optional, List, Tuple, Union
import numpy as np
from numpy.linalg import LinAlgError
# Import base Zernike functionality from existing module
from .extract_zernike import (
compute_zernike_coefficients,
compute_aberration_magnitudes,
zernike_noll,
zernike_label,
read_node_geometry,
find_geometry_file,
extract_displacements_by_subcase,
UNIT_TO_NM,
DEFAULT_N_MODES,
DEFAULT_FILTER_ORDERS,
)
try:
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
except ImportError:
raise ImportError("pyNastran is required. Install with: pip install pyNastran")
# ============================================================================
# OPD Calculation Functions
# ============================================================================
def compute_parabola_z(
x: np.ndarray,
y: np.ndarray,
focal_length: float,
concave: bool = True
) -> np.ndarray:
"""
Compute the Z coordinate on a paraboloid surface.
For a paraboloid: z = ±( + ) / (4 * f)
Args:
x, y: Coordinates on the surface
focal_length: Focal length of the parabola (f = R_vertex / 2)
concave: If True, mirror curves toward -Z (typical telescope primary)
Returns:
Z coordinates on the parabola surface
"""
r_squared = x**2 + y**2
z = r_squared / (4.0 * focal_length)
if concave:
z = -z # Concave mirror curves toward -Z
return z
def compute_true_opd(
x0: np.ndarray,
y0: np.ndarray,
z0: np.ndarray,
dx: np.ndarray,
dy: np.ndarray,
dz: np.ndarray,
focal_length: float,
concave: bool = True
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Compute true Optical Path Difference accounting for lateral displacement.
The key insight: when a node moves laterally, we must compare its new Z
position against what the parabola Z SHOULD BE at that new (x, y) location.
The CORRECT formulation:
- Original surface: z0 = parabola(x0, y0) + manufacturing_errors
- Deformed surface: z0 + dz = parabola(x0 + dx, y0 + dy) + total_errors
The surface error we want is:
error = (z0 + dz) - parabola(x0 + dx, y0 + dy)
But since z0 parabola(x0, y0), we can compute the DIFFERENTIAL:
error dz - [parabola(x0 + dx, y0 + dy) - parabola(x0, y0)]
error dz - Δz_parabola
where Δz_parabola is the change in ideal parabola Z due to lateral movement.
Args:
x0, y0, z0: Original node coordinates (undeformed)
dx, dy, dz: Displacement components from FEA
focal_length: Parabola focal length
concave: Whether mirror is concave (typical)
Returns:
Tuple of:
- x_def: Deformed X coordinates (for Zernike fitting)
- y_def: Deformed Y coordinates (for Zernike fitting)
- surface_error: True surface error at deformed positions
- lateral_magnitude: Magnitude of lateral displacement (diagnostic)
"""
# Deformed coordinates
x_def = x0 + dx
y_def = y0 + dy
# Compute the CHANGE in parabola Z due to lateral displacement
# For parabola: z = ±r²/(4f), so:
# Δz_parabola = z(x_def, y_def) - z(x0, y0)
# = [(x0+dx)² + (y0+dy)² - x0² - y0²] / (4f)
# = [2*x0*dx + dx² + 2*y0*dy + dy²] / (4f)
# Original r² and deformed r²
r0_sq = x0**2 + y0**2
r_def_sq = x_def**2 + y_def**2
# Change in r² due to lateral displacement
delta_r_sq = r_def_sq - r0_sq
# Change in parabola Z (the Z shift that would keep the node on the ideal parabola)
delta_z_parabola = delta_r_sq / (4.0 * focal_length)
if concave:
delta_z_parabola = -delta_z_parabola
# TRUE surface error = actual dz - expected dz (to stay on parabola)
# If node moves laterally outward (larger r), parabola Z changes
# The error is the difference between actual Z movement and expected Z movement
surface_error = dz - delta_z_parabola
# Diagnostic: lateral displacement magnitude
lateral_magnitude = np.sqrt(dx**2 + dy**2)
return x_def, y_def, surface_error, lateral_magnitude
def estimate_focal_length_from_geometry(
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
concave: bool = True
) -> float:
"""
Estimate focal length from node geometry by fitting a paraboloid.
Uses least-squares fit: z = a * ( + ) + b
where focal_length = 1 / (4 * |a|)
Args:
x, y, z: Node coordinates
concave: Whether mirror is concave
Returns:
Estimated focal length
"""
r_squared = x**2 + y**2
# Fit z = a * r² + b
A = np.column_stack([r_squared, np.ones_like(r_squared)])
try:
coeffs, _, _, _ = np.linalg.lstsq(A, z, rcond=None)
a = coeffs[0]
except LinAlgError:
# Fallback: use simple ratio
mask = r_squared > 0
a = np.median(z[mask] / r_squared[mask])
# For concave mirror, a should be negative
# focal_length = 1 / (4 * |a|)
if abs(a) < 1e-12:
raise ValueError("Cannot estimate focal length - surface appears flat")
focal_length = 1.0 / (4.0 * abs(a))
return focal_length
# ============================================================================
# Main Analytic (Parabola-Based) Extractor Class
# ============================================================================
class ZernikeAnalyticExtractor:
"""
Analytic (parabola-based) Zernike extractor for telescope mirror optimization.
This extractor accounts for lateral (X, Y) displacements when computing
wavefront error, using an analytical parabola formula as reference.
NOTE: For most robust analysis, prefer ZernikeOPDExtractor from
extract_zernike_figure.py which uses actual mesh geometry (no shape assumptions).
Use this extractor when:
- You know the focal length / optical prescription
- The surface is parabolic (or near-parabolic)
- You want to compare against theoretical parabola shape
Example:
extractor = ZernikeAnalyticExtractor(
op2_file, bdf_file,
focal_length=5000.0 # mm
)
result = extractor.extract_subcase('20')
# Metrics available:
print(f"Max lateral displacement: {result['max_lateral_disp_um']:.2f} µm")
print(f"RMS lateral displacement: {result['rms_lateral_disp_um']:.2f} µm")
"""
def __init__(
self,
op2_path: Union[str, Path],
bdf_path: Optional[Union[str, Path]] = None,
focal_length: Optional[float] = None,
concave: bool = True,
displacement_unit: str = 'mm',
n_modes: int = DEFAULT_N_MODES,
filter_orders: int = DEFAULT_FILTER_ORDERS,
auto_estimate_focal: bool = True
):
"""
Initialize OPD-based Zernike extractor.
Args:
op2_path: Path to OP2 results file
bdf_path: Path to BDF/DAT geometry file (auto-detected if None)
focal_length: Parabola focal length in same units as geometry
If None and auto_estimate_focal=True, will estimate from mesh
concave: Whether mirror is concave (curves toward -Z)
displacement_unit: Unit of displacement in OP2 ('mm', 'm', 'um', 'nm')
n_modes: Number of Zernike modes to fit
filter_orders: Number of low-order modes to filter
auto_estimate_focal: If True, estimate focal length from geometry
"""
self.op2_path = Path(op2_path)
self.bdf_path = Path(bdf_path) if bdf_path else find_geometry_file(self.op2_path)
self.focal_length = focal_length
self.concave = concave
self.displacement_unit = displacement_unit
self.n_modes = n_modes
self.filter_orders = filter_orders
self.auto_estimate_focal = auto_estimate_focal
# Unit conversion factor
self.nm_scale = UNIT_TO_NM.get(displacement_unit.lower(), 1e6)
self.um_scale = self.nm_scale / 1000.0 # For lateral displacement reporting
# WFE = 2 * surface error (optical convention for reflection)
self.wfe_factor = 2.0 * self.nm_scale
# Lazy-loaded data
self._node_geo = None
self._displacements = None
self._estimated_focal = None
@property
def node_geometry(self) -> Dict[int, np.ndarray]:
"""Lazy-load node geometry from BDF."""
if self._node_geo is None:
self._node_geo = read_node_geometry(self.bdf_path)
return self._node_geo
@property
def displacements(self) -> Dict[str, Dict[str, np.ndarray]]:
"""Lazy-load displacements from OP2."""
if self._displacements is None:
self._displacements = extract_displacements_by_subcase(self.op2_path)
return self._displacements
def get_focal_length(self, x: np.ndarray, y: np.ndarray, z: np.ndarray) -> float:
"""
Get focal length, estimating from geometry if not provided.
"""
if self.focal_length is not None:
return self.focal_length
if self._estimated_focal is not None:
return self._estimated_focal
if self.auto_estimate_focal:
self._estimated_focal = estimate_focal_length_from_geometry(
x, y, z, self.concave
)
return self._estimated_focal
raise ValueError(
"focal_length must be provided or auto_estimate_focal must be True"
)
def _build_opd_data(
self,
subcase_label: str
) -> Dict[str, np.ndarray]:
"""
Build OPD data arrays for a subcase using rigorous method.
Returns:
Dict with:
- x_original, y_original, z_original: Original coordinates
- dx, dy, dz: Displacement components
- x_deformed, y_deformed: Deformed coordinates (for Zernike fitting)
- surface_error: True surface error (mm or model units)
- wfe_nm: Wavefront error in nanometers
- lateral_disp: Lateral displacement magnitude
"""
if subcase_label not in self.displacements:
available = list(self.displacements.keys())
raise ValueError(f"Subcase '{subcase_label}' not found. Available: {available}")
data = self.displacements[subcase_label]
node_ids = data['node_ids']
disp = data['disp']
# Build arrays
x0, y0, z0 = [], [], []
dx_arr, dy_arr, dz_arr = [], [], []
for nid, vec in zip(node_ids, disp):
geo = self.node_geometry.get(int(nid))
if geo is None:
continue
x0.append(geo[0])
y0.append(geo[1])
z0.append(geo[2])
dx_arr.append(vec[0])
dy_arr.append(vec[1])
dz_arr.append(vec[2])
x0 = np.array(x0)
y0 = np.array(y0)
z0 = np.array(z0)
dx_arr = np.array(dx_arr)
dy_arr = np.array(dy_arr)
dz_arr = np.array(dz_arr)
# Get focal length
focal = self.get_focal_length(x0, y0, z0)
# Compute true OPD
x_def, y_def, surface_error, lateral_disp = compute_true_opd(
x0, y0, z0, dx_arr, dy_arr, dz_arr, focal, self.concave
)
# Convert to WFE (nm)
wfe_nm = surface_error * self.wfe_factor
return {
'x_original': x0,
'y_original': y0,
'z_original': z0,
'dx': dx_arr,
'dy': dy_arr,
'dz': dz_arr,
'x_deformed': x_def,
'y_deformed': y_def,
'surface_error': surface_error,
'wfe_nm': wfe_nm,
'lateral_disp': lateral_disp,
'focal_length': focal
}
def extract_subcase(
self,
subcase_label: str,
include_coefficients: bool = False,
include_diagnostics: bool = True
) -> Dict[str, Any]:
"""
Extract Zernike metrics for a single subcase using rigorous OPD method.
Args:
subcase_label: Subcase identifier (e.g., '20', '90')
include_coefficients: Whether to include all Zernike coefficients
include_diagnostics: Whether to include lateral displacement diagnostics
Returns:
Dict with RMS metrics, aberrations, and lateral displacement info
"""
opd_data = self._build_opd_data(subcase_label)
# Use DEFORMED coordinates for Zernike fitting
X = opd_data['x_deformed']
Y = opd_data['y_deformed']
WFE = opd_data['wfe_nm']
# Fit Zernike coefficients
coeffs, R_max = compute_zernike_coefficients(X, Y, WFE, self.n_modes)
# Compute RMS metrics
# Center on deformed coordinates
x_c = X - np.mean(X)
y_c = Y - np.mean(Y)
r = np.hypot(x_c / R_max, y_c / R_max)
theta = np.arctan2(y_c, x_c)
# Build low-order Zernike basis
Z_low = np.column_stack([
zernike_noll(j, r, theta) for j in range(1, self.filter_orders + 1)
])
# Filtered WFE
wfe_filtered = WFE - Z_low @ coeffs[:self.filter_orders]
global_rms = float(np.sqrt(np.mean(WFE**2)))
filtered_rms = float(np.sqrt(np.mean(wfe_filtered**2)))
# J1-J3 filtered (optician workload)
Z_j1to3 = np.column_stack([
zernike_noll(j, r, theta) for j in range(1, 4)
])
wfe_j1to3 = WFE - Z_j1to3 @ coeffs[:3]
rms_j1to3 = float(np.sqrt(np.mean(wfe_j1to3**2)))
# Aberration magnitudes
aberrations = compute_aberration_magnitudes(coeffs)
result = {
'subcase': subcase_label,
'method': 'opd_rigorous',
'global_rms_nm': global_rms,
'filtered_rms_nm': filtered_rms,
'rms_filter_j1to3_nm': rms_j1to3,
'n_nodes': len(X),
'focal_length_used': opd_data['focal_length'],
**aberrations
}
if include_diagnostics:
lateral = opd_data['lateral_disp']
result.update({
'max_lateral_disp_um': float(np.max(lateral) * self.um_scale),
'rms_lateral_disp_um': float(np.sqrt(np.mean(lateral**2)) * self.um_scale),
'mean_lateral_disp_um': float(np.mean(lateral) * self.um_scale),
'p99_lateral_disp_um': float(np.percentile(lateral, 99) * self.um_scale),
})
if include_coefficients:
result['coefficients'] = coeffs.tolist()
result['coefficient_labels'] = [
zernike_label(j) for j in range(1, self.n_modes + 1)
]
return result
def extract_comparison(
self,
subcase_label: str
) -> Dict[str, Any]:
"""
Extract metrics using BOTH methods for comparison.
Useful for understanding the impact of the rigorous method
vs the standard Z-only approach.
Returns:
Dict with metrics from both methods and the delta
"""
from .extract_zernike import ZernikeExtractor
# Rigorous OPD method
opd_result = self.extract_subcase(subcase_label, include_diagnostics=True)
# Standard Z-only method
standard_extractor = ZernikeExtractor(
self.op2_path, self.bdf_path,
self.displacement_unit, self.n_modes, self.filter_orders
)
standard_result = standard_extractor.extract_subcase(subcase_label)
# Compute deltas
delta_global = opd_result['global_rms_nm'] - standard_result['global_rms_nm']
delta_filtered = opd_result['filtered_rms_nm'] - standard_result['filtered_rms_nm']
return {
'subcase': subcase_label,
'opd_method': {
'global_rms_nm': opd_result['global_rms_nm'],
'filtered_rms_nm': opd_result['filtered_rms_nm'],
},
'standard_method': {
'global_rms_nm': standard_result['global_rms_nm'],
'filtered_rms_nm': standard_result['filtered_rms_nm'],
},
'delta': {
'global_rms_nm': delta_global,
'filtered_rms_nm': delta_filtered,
'percent_difference_filtered': 100.0 * delta_filtered / standard_result['filtered_rms_nm'] if standard_result['filtered_rms_nm'] > 0 else 0.0,
},
'lateral_displacement': {
'max_um': opd_result['max_lateral_disp_um'],
'rms_um': opd_result['rms_lateral_disp_um'],
'p99_um': opd_result['p99_lateral_disp_um'],
},
'recommendation': _get_method_recommendation(opd_result)
}
def extract_all_subcases(
self,
reference_subcase: Optional[str] = None
) -> Dict[str, Dict[str, Any]]:
"""
Extract metrics for all available subcases.
Args:
reference_subcase: Reference for relative calculations (None to skip)
Returns:
Dict mapping subcase label to metrics dict
"""
results = {}
for label in self.displacements.keys():
results[label] = self.extract_subcase(label)
return results
def get_diagnostic_data(
self,
subcase_label: str
) -> Dict[str, np.ndarray]:
"""
Get raw data for visualization/debugging.
Returns arrays suitable for plotting lateral displacement maps,
comparing methods, etc.
"""
return self._build_opd_data(subcase_label)
def _get_method_recommendation(opd_result: Dict[str, Any]) -> str:
"""
Generate recommendation based on lateral displacement magnitude.
"""
max_lateral = opd_result.get('max_lateral_disp_um', 0)
rms_lateral = opd_result.get('rms_lateral_disp_um', 0)
if max_lateral > 10.0: # > 10 µm max lateral
return "CRITICAL: Large lateral displacements detected. OPD method strongly recommended."
elif max_lateral > 1.0: # > 1 µm
return "RECOMMENDED: Significant lateral displacements. OPD method provides more accurate results."
elif max_lateral > 0.1: # > 0.1 µm
return "OPTIONAL: Small lateral displacements. OPD method provides minor improvement."
else:
return "EQUIVALENT: Negligible lateral displacements. Both methods give similar results."
# ============================================================================
# Convenience Functions
# ============================================================================
def extract_zernike_analytic(
op2_file: Union[str, Path],
bdf_file: Optional[Union[str, Path]] = None,
subcase: Union[int, str] = 1,
focal_length: Optional[float] = None,
displacement_unit: str = 'mm',
**kwargs
) -> Dict[str, Any]:
"""
Convenience function to extract Zernike metrics using analytic parabola method.
NOTE: For most robust analysis, prefer extract_zernike_opd() from
extract_zernike_figure.py which uses actual mesh geometry.
Args:
op2_file: Path to OP2 results file
bdf_file: Path to BDF geometry (auto-detected if None)
subcase: Subcase identifier
focal_length: Parabola focal length (auto-estimated if None)
displacement_unit: Unit of displacement in OP2
**kwargs: Additional arguments for ZernikeAnalyticExtractor
Returns:
Dict with RMS metrics, aberrations, and lateral displacement info
"""
extractor = ZernikeAnalyticExtractor(
op2_file, bdf_file,
focal_length=focal_length,
displacement_unit=displacement_unit,
**kwargs
)
return extractor.extract_subcase(str(subcase))
def extract_zernike_analytic_filtered_rms(
op2_file: Union[str, Path],
subcase: Union[int, str] = 1,
**kwargs
) -> float:
"""
Extract filtered RMS WFE using analytic parabola method.
NOTE: For most robust analysis, prefer extract_zernike_opd_filtered_rms()
from extract_zernike_figure.py.
"""
result = extract_zernike_analytic(op2_file, subcase=subcase, **kwargs)
return result['filtered_rms_nm']
def compare_zernike_methods(
op2_file: Union[str, Path],
bdf_file: Optional[Union[str, Path]] = None,
subcase: Union[int, str] = 1,
focal_length: Optional[float] = None,
**kwargs
) -> Dict[str, Any]:
"""
Compare standard vs analytic (parabola) Zernike methods.
Use this to understand how much lateral displacement affects your results.
"""
extractor = ZernikeAnalyticExtractor(
op2_file, bdf_file,
focal_length=focal_length,
**kwargs
)
return extractor.extract_comparison(str(subcase))
# Backwards compatibility aliases
ZernikeOPDExtractor = ZernikeAnalyticExtractor # Deprecated: use ZernikeAnalyticExtractor
extract_zernike_opd = extract_zernike_analytic # Deprecated: use extract_zernike_analytic
extract_zernike_opd_filtered_rms = extract_zernike_analytic_filtered_rms # Deprecated
# ============================================================================
# Module Exports
# ============================================================================
__all__ = [
# Main extractor class (new name)
'ZernikeAnalyticExtractor',
# Convenience functions (new names)
'extract_zernike_analytic',
'extract_zernike_analytic_filtered_rms',
'compare_zernike_methods',
# Utility functions
'compute_true_opd',
'compute_parabola_z',
'estimate_focal_length_from_geometry',
# Backwards compatibility (deprecated)
'ZernikeOPDExtractor',
'extract_zernike_opd',
'extract_zernike_opd_filtered_rms',
]
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
focal = float(sys.argv[2]) if len(sys.argv) > 2 else None
print(f"Analyzing: {op2_file}")
print(f"Focal length: {focal if focal else 'auto-estimate'}")
print("=" * 60)
try:
extractor = ZernikeOPDExtractor(op2_file, focal_length=focal)
print(f"\nAvailable subcases: {list(extractor.displacements.keys())}")
for label in extractor.displacements.keys():
print(f"\n{'=' * 60}")
print(f"Subcase {label}")
print('=' * 60)
# Compare methods
comparison = extractor.extract_comparison(label)
print(f"\nStandard (Z-only) method:")
print(f" Global RMS: {comparison['standard_method']['global_rms_nm']:.2f} nm")
print(f" Filtered RMS: {comparison['standard_method']['filtered_rms_nm']:.2f} nm")
print(f"\nRigorous OPD method:")
print(f" Global RMS: {comparison['opd_method']['global_rms_nm']:.2f} nm")
print(f" Filtered RMS: {comparison['opd_method']['filtered_rms_nm']:.2f} nm")
print(f"\nDifference (OPD - Standard):")
print(f" Filtered RMS: {comparison['delta']['filtered_rms_nm']:+.2f} nm ({comparison['delta']['percent_difference_filtered']:+.1f}%)")
print(f"\nLateral Displacement:")
print(f" Max: {comparison['lateral_displacement']['max_um']:.3f} µm")
print(f" RMS: {comparison['lateral_displacement']['rms_um']:.3f} µm")
print(f" P99: {comparison['lateral_displacement']['p99_um']:.3f} µm")
print(f"\n{comparison['recommendation']}")
except Exception as e:
print(f"Error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
else:
print("OPD-Based Zernike Extractor")
print("=" * 40)
print("\nUsage: python extract_zernike_opd.py <op2_file> [focal_length]")
print("\nThis module provides rigorous OPD-based Zernike extraction")
print("that accounts for lateral (X, Y) displacements.")
print("\nKey features:")
print(" - True optical path difference calculation")
print(" - Accounts for node lateral shift due to pinching/clamping")
print(" - Lateral displacement diagnostics")
print(" - Method comparison (standard vs OPD)")