feat: Add Studio UI, intake system, and extractor improvements
Dashboard: - Add Studio page with drag-drop model upload and Claude chat - Add intake system for study creation workflow - Improve session manager and context builder - Add intake API routes and frontend components Optimization Engine: - Add CLI module for command-line operations - Add intake module for study preprocessing - Add validation module with gate checks - Improve Zernike extractor documentation - Update spec models with better validation - Enhance solve_simulation robustness Documentation: - Add ATOMIZER_STUDIO.md planning doc - Add ATOMIZER_UX_SYSTEM.md for UX patterns - Update extractor library docs - Add study-readme-generator skill Tools: - Add test scripts for extraction validation - Add Zernike recentering test Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
38
tools/test_extraction.py
Normal file
38
tools/test_extraction.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test extraction pipeline on existing OP2 file."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from optimization_engine.extractors import extract_displacement, extract_solid_stress
|
||||
|
||||
op2_path = r"C:\Users\antoi\Atomizer\studies\_Other\Model_for_dev\support_arm_sim1-solution_1.op2"
|
||||
|
||||
print("Testing extractors on existing OP2 file...")
|
||||
print(f"File: {op2_path}")
|
||||
print(f"Exists: {Path(op2_path).exists()}")
|
||||
print()
|
||||
|
||||
# Test displacement extraction
|
||||
print("1. Displacement extraction:")
|
||||
try:
|
||||
result = extract_displacement(op2_path)
|
||||
print(f" Max magnitude: {result.get('max_magnitude', 'N/A')} mm")
|
||||
print(f" Full result: {result}")
|
||||
except Exception as e:
|
||||
print(f" ERROR: {e}")
|
||||
|
||||
# Test stress extraction
|
||||
print()
|
||||
print("2. Stress extraction (CTETRA):")
|
||||
try:
|
||||
result = extract_solid_stress(op2_path, element_type="ctetra")
|
||||
print(f" Max von Mises: {result.get('max_von_mises', 'N/A')} MPa")
|
||||
print(f" Full result: {result}")
|
||||
except Exception as e:
|
||||
print(f" ERROR: {e}")
|
||||
|
||||
print()
|
||||
print("Done!")
|
||||
172
tools/test_j1_vs_mean_per_subcase.py
Normal file
172
tools/test_j1_vs_mean_per_subcase.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""
|
||||
Test: J1 coefficient vs mean(WFE) for each subcase individually.
|
||||
|
||||
Hypothesis: At 90 deg (zenith), gravity is axially symmetric, so J1 should
|
||||
closely match mean(WFE). At other angles (20, 40, 60), lateral gravity
|
||||
components break symmetry, potentially causing J1 != mean.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from optimization_engine.extractors.extract_zernike import (
|
||||
ZernikeExtractor,
|
||||
compute_zernike_coefficients,
|
||||
DEFAULT_N_MODES,
|
||||
)
|
||||
|
||||
|
||||
def test_j1_vs_mean_per_subcase():
|
||||
"""Test J1 vs mean for each subcase in real data."""
|
||||
print("=" * 70)
|
||||
print("J1 vs mean(WFE) PER SUBCASE")
|
||||
print("=" * 70)
|
||||
|
||||
# Find OP2 files
|
||||
studies_dir = Path(__file__).parent.parent / "studies"
|
||||
|
||||
# Look for M1 mirror studies specifically
|
||||
op2_files = list(studies_dir.rglob("**/m1_mirror*/**/*.op2"))
|
||||
if not op2_files:
|
||||
op2_files = list(studies_dir.rglob("*.op2"))
|
||||
|
||||
if not op2_files:
|
||||
print("No OP2 files found!")
|
||||
return
|
||||
|
||||
# Use the first one
|
||||
op2_file = op2_files[0]
|
||||
print(f"\nUsing: {op2_file.relative_to(studies_dir.parent)}")
|
||||
|
||||
try:
|
||||
extractor = ZernikeExtractor(op2_file)
|
||||
subcases = list(extractor.displacements.keys())
|
||||
print(f"Available subcases: {subcases}")
|
||||
|
||||
print(f"\n{'Subcase':<10} {'Mean(WFE)':<15} {'J1 Coeff':<15} {'|Diff|':<12} {'Diff %':<10}")
|
||||
print("-" * 70)
|
||||
|
||||
results = []
|
||||
for sc in sorted(subcases, key=lambda x: int(x) if x.isdigit() else 0):
|
||||
try:
|
||||
X, Y, WFE = extractor._build_dataframe(sc)
|
||||
|
||||
# Compute Zernike coefficients
|
||||
coeffs, R_max = compute_zernike_coefficients(X, Y, WFE, DEFAULT_N_MODES)
|
||||
|
||||
j1 = coeffs[0]
|
||||
wfe_mean = np.mean(WFE)
|
||||
diff = abs(j1 - wfe_mean)
|
||||
pct_diff = 100 * diff / abs(wfe_mean) if abs(wfe_mean) > 1e-6 else 0
|
||||
|
||||
print(f"{sc:<10} {wfe_mean:<15.4f} {j1:<15.4f} {diff:<12.4f} {pct_diff:<10.4f}")
|
||||
|
||||
results.append({
|
||||
'subcase': sc,
|
||||
'mean_wfe': wfe_mean,
|
||||
'j1': j1,
|
||||
'diff': diff,
|
||||
'pct_diff': pct_diff
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"{sc:<10} ERROR: {e}")
|
||||
|
||||
# Also check RELATIVE WFE (e.g., 20 vs 90, 40 vs 90, 60 vs 90)
|
||||
print(f"\n" + "=" * 70)
|
||||
print("RELATIVE WFE (vs reference subcase)")
|
||||
print("=" * 70)
|
||||
|
||||
# Find 90 or use first subcase as reference
|
||||
ref_sc = '90' if '90' in subcases else subcases[0]
|
||||
print(f"Reference subcase: {ref_sc}")
|
||||
|
||||
print(f"\n{'Relative':<15} {'Mean(WFE_rel)':<15} {'J1 Coeff':<15} {'|Diff|':<12} {'Diff %':<10}")
|
||||
print("-" * 70)
|
||||
|
||||
ref_data = extractor.displacements[ref_sc]
|
||||
ref_node_to_idx = {int(nid): i for i, nid in enumerate(ref_data['node_ids'])}
|
||||
|
||||
for sc in sorted(subcases, key=lambda x: int(x) if x.isdigit() else 0):
|
||||
if sc == ref_sc:
|
||||
continue
|
||||
|
||||
try:
|
||||
target_data = extractor.displacements[sc]
|
||||
|
||||
X_rel, Y_rel, WFE_rel = [], [], []
|
||||
|
||||
for i, nid in enumerate(target_data['node_ids']):
|
||||
nid = int(nid)
|
||||
if nid not in ref_node_to_idx:
|
||||
continue
|
||||
ref_idx = ref_node_to_idx[nid]
|
||||
geo = extractor.node_geometry.get(nid)
|
||||
if geo is None:
|
||||
continue
|
||||
|
||||
X_rel.append(geo[0])
|
||||
Y_rel.append(geo[1])
|
||||
|
||||
target_wfe = target_data['disp'][i, 2] * extractor.wfe_factor
|
||||
ref_wfe = ref_data['disp'][ref_idx, 2] * extractor.wfe_factor
|
||||
WFE_rel.append(target_wfe - ref_wfe)
|
||||
|
||||
X_rel = np.array(X_rel)
|
||||
Y_rel = np.array(Y_rel)
|
||||
WFE_rel = np.array(WFE_rel)
|
||||
|
||||
# Compute Zernike on relative WFE
|
||||
coeffs_rel, _ = compute_zernike_coefficients(X_rel, Y_rel, WFE_rel, DEFAULT_N_MODES)
|
||||
|
||||
j1_rel = coeffs_rel[0]
|
||||
wfe_rel_mean = np.mean(WFE_rel)
|
||||
diff_rel = abs(j1_rel - wfe_rel_mean)
|
||||
pct_diff_rel = 100 * diff_rel / abs(wfe_rel_mean) if abs(wfe_rel_mean) > 1e-6 else 0
|
||||
|
||||
label = f"{sc} vs {ref_sc}"
|
||||
print(f"{label:<15} {wfe_rel_mean:<15.4f} {j1_rel:<15.4f} {diff_rel:<12.4f} {pct_diff_rel:<10.4f}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"{sc} vs {ref_sc}: ERROR: {e}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def test_symmetry_analysis():
|
||||
"""Analyze why J1 != mean for different subcases."""
|
||||
print(f"\n" + "=" * 70)
|
||||
print("SYMMETRY ANALYSIS")
|
||||
print("=" * 70)
|
||||
print("""
|
||||
Theory: J1 (piston) should equal mean(WFE) when:
|
||||
1. The aperture is circular/annular AND
|
||||
2. The sampling is uniform in angle AND
|
||||
3. The WFE has no bias correlated with position
|
||||
|
||||
At 90 deg (zenith):
|
||||
- Gravity acts purely in Z direction
|
||||
- Deformation should be axially symmetric
|
||||
- J1 should closely match mean(WFE)
|
||||
|
||||
At 20/40/60 deg:
|
||||
- Gravity has lateral (X,Y) components
|
||||
- Deformation may have asymmetric patterns
|
||||
- Tip/tilt (J2,J3) will be large
|
||||
- But J1 vs mean should still be close IF sampling is uniform
|
||||
|
||||
The difference J1-mean comes from:
|
||||
- Non-uniform radial sampling (mesh density varies)
|
||||
- Correlation between WFE and position (asymmetric loading)
|
||||
""")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_j1_vs_mean_per_subcase()
|
||||
test_symmetry_analysis()
|
||||
349
tools/test_zernike_recentering.py
Normal file
349
tools/test_zernike_recentering.py
Normal file
@@ -0,0 +1,349 @@
|
||||
"""
|
||||
Test: Is recentering Z per subcase equivalent to removing piston from relative WFE?
|
||||
|
||||
Theory:
|
||||
- Option A: Recenter each subcase, then subtract
|
||||
WFE_rel_A = (dZ_20 - mean(dZ_20)) - (dZ_90 - mean(dZ_90))
|
||||
|
||||
- Option B: Subtract raw, then remove piston via Zernike J1
|
||||
WFE_rel_B = dZ_20 - dZ_90
|
||||
Then filter J1 from Zernike fit
|
||||
|
||||
Mathematically:
|
||||
WFE_rel_A = dZ_20 - dZ_90 - mean(dZ_20) + mean(dZ_90)
|
||||
= (dZ_20 - dZ_90) - (mean(dZ_20) - mean(dZ_90))
|
||||
|
||||
If nodes are identical: mean(dZ_20) - mean(dZ_90) = mean(dZ_20 - dZ_90)
|
||||
So: WFE_rel_A = WFE_rel_B - mean(WFE_rel_B)
|
||||
|
||||
This should equal WFE_rel_B with J1 (piston) removed, BUT only if:
|
||||
1. Piston Zernike Z1 = 1 (constant)
|
||||
2. Sampling is uniform (or Z1 coefficient = mean for non-uniform)
|
||||
|
||||
For annular apertures and non-uniform mesh sampling, this might not be exact!
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from optimization_engine.extractors.extract_zernike import (
|
||||
ZernikeExtractor,
|
||||
compute_zernike_coefficients,
|
||||
compute_rms_metrics,
|
||||
zernike_noll,
|
||||
DEFAULT_N_MODES,
|
||||
DEFAULT_FILTER_ORDERS,
|
||||
)
|
||||
|
||||
|
||||
def test_recentering_equivalence_synthetic():
|
||||
"""Test with synthetic data where we control everything."""
|
||||
print("=" * 70)
|
||||
print("TEST 1: Synthetic Data")
|
||||
print("=" * 70)
|
||||
|
||||
np.random.seed(42)
|
||||
|
||||
# Create annular aperture (like a telescope mirror)
|
||||
n_points = 5000
|
||||
r_inner = 0.3 # 30% central obscuration
|
||||
r_outer = 1.0
|
||||
|
||||
# Generate random points in annulus
|
||||
r = np.sqrt(np.random.uniform(r_inner**2, r_outer**2, n_points))
|
||||
theta = np.random.uniform(0, 2*np.pi, n_points)
|
||||
X = r * np.cos(theta) * 500 # Scale to 500mm radius
|
||||
Y = r * np.sin(theta) * 500
|
||||
|
||||
# Simulate Z-displacements for two subcases (in mm, will convert to nm)
|
||||
# Subcase 90: Some aberration pattern + piston
|
||||
piston_90 = 0.001 # 1 micron mean displacement
|
||||
dZ_90 = piston_90 + 0.0005 * (X**2 + Y**2) / 500**2 # Add some power
|
||||
dZ_90 += 0.0002 * np.random.randn(n_points) # Add noise
|
||||
|
||||
# Subcase 20: Different aberration + different piston
|
||||
piston_20 = 0.003 # 3 micron mean displacement
|
||||
dZ_20 = piston_20 + 0.0008 * (X**2 + Y**2) / 500**2 # More power
|
||||
dZ_20 += 0.0003 * X / 500 # Add some tilt
|
||||
dZ_20 += 0.0002 * np.random.randn(n_points) # Add noise
|
||||
|
||||
# Convert to WFE in nm (2x for reflection, 1e6 for mm->nm)
|
||||
wfe_factor = 2.0 * 1e6
|
||||
WFE_90 = dZ_90 * wfe_factor
|
||||
WFE_20 = dZ_20 * wfe_factor
|
||||
|
||||
print(f"\nInput data:")
|
||||
print(f" Points: {n_points} (annular, r_inner={r_inner})")
|
||||
print(f" Mean WFE_90: {np.mean(WFE_90):.2f} nm")
|
||||
print(f" Mean WFE_20: {np.mean(WFE_20):.2f} nm")
|
||||
print(f" Mean difference: {np.mean(WFE_20) - np.mean(WFE_90):.2f} nm")
|
||||
|
||||
# =========================================================================
|
||||
# Option A: Recenter each subcase BEFORE subtraction
|
||||
# =========================================================================
|
||||
WFE_90_centered = WFE_90 - np.mean(WFE_90)
|
||||
WFE_20_centered = WFE_20 - np.mean(WFE_20)
|
||||
WFE_rel_A = WFE_20_centered - WFE_90_centered
|
||||
|
||||
# Fit Zernike to option A
|
||||
coeffs_A, R_max_A = compute_zernike_coefficients(X, Y, WFE_rel_A, DEFAULT_N_MODES)
|
||||
|
||||
# Compute RMS metrics for option A
|
||||
rms_A = compute_rms_metrics(X, Y, WFE_rel_A, DEFAULT_N_MODES, DEFAULT_FILTER_ORDERS)
|
||||
|
||||
print(f"\nOption A (recenter before subtraction):")
|
||||
print(f" Mean WFE_rel_A: {np.mean(WFE_rel_A):.6f} nm (should be ~0)")
|
||||
print(f" J1 (piston) coefficient: {coeffs_A[0]:.6f} nm")
|
||||
print(f" Global RMS: {rms_A['global_rms_nm']:.2f} nm")
|
||||
print(f" Filtered RMS (J1-J4 removed): {rms_A['filtered_rms_nm']:.2f} nm")
|
||||
|
||||
# =========================================================================
|
||||
# Option B: Subtract raw, then analyze (current implementation)
|
||||
# =========================================================================
|
||||
WFE_rel_B = WFE_20 - WFE_90 # No recentering
|
||||
|
||||
# Fit Zernike to option B
|
||||
coeffs_B, R_max_B = compute_zernike_coefficients(X, Y, WFE_rel_B, DEFAULT_N_MODES)
|
||||
|
||||
# Compute RMS metrics for option B
|
||||
rms_B = compute_rms_metrics(X, Y, WFE_rel_B, DEFAULT_N_MODES, DEFAULT_FILTER_ORDERS)
|
||||
|
||||
print(f"\nOption B (current: subtract raw, filter J1-J4 after):")
|
||||
print(f" Mean WFE_rel_B: {np.mean(WFE_rel_B):.2f} nm")
|
||||
print(f" J1 (piston) coefficient: {coeffs_B[0]:.2f} nm")
|
||||
print(f" Global RMS: {rms_B['global_rms_nm']:.2f} nm")
|
||||
print(f" Filtered RMS (J1-J4 removed): {rms_B['filtered_rms_nm']:.2f} nm")
|
||||
|
||||
# =========================================================================
|
||||
# Compare
|
||||
# =========================================================================
|
||||
print(f"\n" + "=" * 70)
|
||||
print("COMPARISON")
|
||||
print("=" * 70)
|
||||
|
||||
# Check if J1 coefficient in B equals the mean
|
||||
j1_vs_mean_diff = abs(coeffs_B[0] - np.mean(WFE_rel_B))
|
||||
print(f"\n1. Does J1 = mean(WFE)?")
|
||||
print(f" J1 coefficient: {coeffs_B[0]:.6f} nm")
|
||||
print(f" Mean of WFE: {np.mean(WFE_rel_B):.6f} nm")
|
||||
print(f" Difference: {j1_vs_mean_diff:.6f} nm")
|
||||
print(f" --> {'YES (negligible diff)' if j1_vs_mean_diff < 0.01 else 'NO (significant diff!)'}")
|
||||
|
||||
# Check if filtered RMS is the same
|
||||
filtered_rms_diff = abs(rms_A['filtered_rms_nm'] - rms_B['filtered_rms_nm'])
|
||||
print(f"\n2. Is filtered RMS the same?")
|
||||
print(f" Option A filtered RMS: {rms_A['filtered_rms_nm']:.6f} nm")
|
||||
print(f" Option B filtered RMS: {rms_B['filtered_rms_nm']:.6f} nm")
|
||||
print(f" Difference: {filtered_rms_diff:.6f} nm")
|
||||
print(f" --> {'EQUIVALENT' if filtered_rms_diff < 0.01 else 'NOT EQUIVALENT!'}")
|
||||
|
||||
# Check all coefficients (J2 onwards should be identical)
|
||||
coeff_diffs = np.abs(coeffs_A[1:] - coeffs_B[1:]) # Skip J1
|
||||
max_coeff_diff = np.max(coeff_diffs)
|
||||
print(f"\n3. Are non-piston coefficients (J2+) identical?")
|
||||
print(f" Max difference in J2-J{DEFAULT_N_MODES}: {max_coeff_diff:.6f} nm")
|
||||
print(f" --> {'YES' if max_coeff_diff < 0.01 else 'NO!'}")
|
||||
|
||||
# The key insight: for non-uniform sampling, J1 might not equal mean exactly
|
||||
# Let's check how Z1 (piston polynomial) behaves
|
||||
x_c = X - np.mean(X)
|
||||
y_c = Y - np.mean(Y)
|
||||
R_max = np.max(np.hypot(x_c, y_c))
|
||||
r_norm = np.hypot(x_c / R_max, y_c / R_max)
|
||||
theta_norm = np.arctan2(y_c, x_c)
|
||||
|
||||
Z1 = zernike_noll(1, r_norm, theta_norm) # Piston polynomial
|
||||
print(f"\n4. Piston polynomial Z1 statistics:")
|
||||
print(f" Z1 should be constant = 1 for all points")
|
||||
print(f" Mean(Z1): {np.mean(Z1):.6f}")
|
||||
print(f" Std(Z1): {np.std(Z1):.6f}")
|
||||
print(f" Min(Z1): {np.min(Z1):.6f}")
|
||||
print(f" Max(Z1): {np.max(Z1):.6f}")
|
||||
|
||||
return filtered_rms_diff < 0.01
|
||||
|
||||
|
||||
def test_recentering_with_real_data():
|
||||
"""Test with real OP2 data if available."""
|
||||
print("\n" + "=" * 70)
|
||||
print("TEST 2: Real Data (if available)")
|
||||
print("=" * 70)
|
||||
|
||||
# Look for a real study with OP2 data
|
||||
studies_dir = Path(__file__).parent.parent / "studies"
|
||||
op2_files = list(studies_dir.rglob("*.op2"))
|
||||
|
||||
if not op2_files:
|
||||
print(" No OP2 files found in studies directory. Skipping real data test.")
|
||||
return None
|
||||
|
||||
# Use the first one found
|
||||
op2_file = op2_files[0]
|
||||
print(f"\n Using: {op2_file.relative_to(studies_dir.parent)}")
|
||||
|
||||
try:
|
||||
extractor = ZernikeExtractor(op2_file)
|
||||
subcases = list(extractor.displacements.keys())
|
||||
print(f" Available subcases: {subcases}")
|
||||
|
||||
if len(subcases) < 2:
|
||||
print(" Need at least 2 subcases for relative comparison. Skipping.")
|
||||
return None
|
||||
|
||||
# Pick two subcases
|
||||
ref_subcase = subcases[0]
|
||||
target_subcase = subcases[1]
|
||||
print(f" Comparing: {target_subcase} vs {ref_subcase}")
|
||||
|
||||
# Get raw data
|
||||
X_t, Y_t, WFE_t = extractor._build_dataframe(target_subcase)
|
||||
X_r, Y_r, WFE_r = extractor._build_dataframe(ref_subcase)
|
||||
|
||||
# Build node matching (same logic as extract_relative)
|
||||
target_data = extractor.displacements[target_subcase]
|
||||
ref_data = extractor.displacements[ref_subcase]
|
||||
|
||||
ref_node_to_idx = {
|
||||
int(nid): i for i, nid in enumerate(ref_data['node_ids'])
|
||||
}
|
||||
|
||||
X_common, Y_common = [], []
|
||||
WFE_target_common, WFE_ref_common = [], []
|
||||
|
||||
for i, nid in enumerate(target_data['node_ids']):
|
||||
nid = int(nid)
|
||||
if nid not in ref_node_to_idx:
|
||||
continue
|
||||
ref_idx = ref_node_to_idx[nid]
|
||||
geo = extractor.node_geometry.get(nid)
|
||||
if geo is None:
|
||||
continue
|
||||
|
||||
X_common.append(geo[0])
|
||||
Y_common.append(geo[1])
|
||||
WFE_target_common.append(target_data['disp'][i, 2] * extractor.wfe_factor)
|
||||
WFE_ref_common.append(ref_data['disp'][ref_idx, 2] * extractor.wfe_factor)
|
||||
|
||||
X = np.array(X_common)
|
||||
Y = np.array(Y_common)
|
||||
WFE_target = np.array(WFE_target_common)
|
||||
WFE_ref = np.array(WFE_ref_common)
|
||||
|
||||
print(f" Common nodes: {len(X)}")
|
||||
print(f" Mean WFE target ({target_subcase}): {np.mean(WFE_target):.2f} nm")
|
||||
print(f" Mean WFE ref ({ref_subcase}): {np.mean(WFE_ref):.2f} nm")
|
||||
|
||||
# Option A: Recenter before
|
||||
WFE_target_centered = WFE_target - np.mean(WFE_target)
|
||||
WFE_ref_centered = WFE_ref - np.mean(WFE_ref)
|
||||
WFE_rel_A = WFE_target_centered - WFE_ref_centered
|
||||
|
||||
rms_A = compute_rms_metrics(X, Y, WFE_rel_A, DEFAULT_N_MODES, DEFAULT_FILTER_ORDERS)
|
||||
|
||||
# Option B: Current (no recentering)
|
||||
WFE_rel_B = WFE_target - WFE_ref
|
||||
rms_B = compute_rms_metrics(X, Y, WFE_rel_B, DEFAULT_N_MODES, DEFAULT_FILTER_ORDERS)
|
||||
|
||||
print(f"\n Option A (recenter before):")
|
||||
print(f" Filtered RMS: {rms_A['filtered_rms_nm']:.4f} nm")
|
||||
|
||||
print(f"\n Option B (current, filter after):")
|
||||
print(f" Filtered RMS: {rms_B['filtered_rms_nm']:.4f} nm")
|
||||
|
||||
diff = abs(rms_A['filtered_rms_nm'] - rms_B['filtered_rms_nm'])
|
||||
pct_diff = 100 * diff / rms_B['filtered_rms_nm'] if rms_B['filtered_rms_nm'] > 0 else 0
|
||||
|
||||
print(f"\n Difference: {diff:.4f} nm ({pct_diff:.4f}%)")
|
||||
print(f" --> {'EQUIVALENT' if pct_diff < 0.1 else 'NOT EQUIVALENT!'}")
|
||||
|
||||
return pct_diff < 0.1
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
|
||||
def test_annular_zernike_piston():
|
||||
"""
|
||||
Specifically test whether J1 = mean for annular apertures.
|
||||
|
||||
For a filled circular aperture with uniform sampling, J1 = mean exactly.
|
||||
For annular apertures or non-uniform sampling, this may not hold!
|
||||
"""
|
||||
print("\n" + "=" * 70)
|
||||
print("TEST 3: Annular Aperture - Does J1 = mean?")
|
||||
print("=" * 70)
|
||||
|
||||
np.random.seed(123)
|
||||
|
||||
# Test different inner radii
|
||||
for r_inner in [0.0, 0.2, 0.4, 0.6]:
|
||||
n_points = 10000
|
||||
r_outer = 1.0
|
||||
|
||||
# Generate points in annulus
|
||||
if r_inner > 0:
|
||||
r = np.sqrt(np.random.uniform(r_inner**2, r_outer**2, n_points))
|
||||
else:
|
||||
r = np.sqrt(np.random.uniform(0, r_outer**2, n_points))
|
||||
theta = np.random.uniform(0, 2*np.pi, n_points)
|
||||
X = r * np.cos(theta) * 500
|
||||
Y = r * np.sin(theta) * 500
|
||||
|
||||
# Create WFE with known piston
|
||||
true_piston = 1000.0 # nm
|
||||
WFE = true_piston + 100 * np.random.randn(n_points)
|
||||
|
||||
# Fit Zernike
|
||||
coeffs, _ = compute_zernike_coefficients(X, Y, WFE, DEFAULT_N_MODES)
|
||||
|
||||
j1_coeff = coeffs[0]
|
||||
wfe_mean = np.mean(WFE)
|
||||
diff = abs(j1_coeff - wfe_mean)
|
||||
|
||||
print(f"\n r_inner = {r_inner}:")
|
||||
print(f" True piston: {true_piston:.2f} nm")
|
||||
print(f" Mean(WFE): {wfe_mean:.2f} nm")
|
||||
print(f" J1 coefficient: {j1_coeff:.2f} nm")
|
||||
print(f" |J1 - mean|: {diff:.4f} nm")
|
||||
print(f" --> {'J1 ≈ mean' if diff < 1.0 else 'J1 ≠ mean!'}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("\n" + "=" * 70)
|
||||
print("ZERNIKE RECENTERING EQUIVALENCE TEST")
|
||||
print("=" * 70)
|
||||
print("\nQuestion: Is recentering Z per subcase equivalent to")
|
||||
print(" removing piston (J1) from relative WFE?")
|
||||
print("=" * 70)
|
||||
|
||||
# Run tests
|
||||
test1_passed = test_recentering_equivalence_synthetic()
|
||||
test2_passed = test_recentering_with_real_data()
|
||||
test_annular_zernike_piston()
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 70)
|
||||
print("SUMMARY")
|
||||
print("=" * 70)
|
||||
print(f"\nSynthetic data test: {'PASSED' if test1_passed else 'FAILED'}")
|
||||
if test2_passed is not None:
|
||||
print(f"Real data test: {'PASSED' if test2_passed else 'FAILED'}")
|
||||
else:
|
||||
print("Real data test: SKIPPED")
|
||||
|
||||
print("\nConclusion:")
|
||||
if test1_passed:
|
||||
print(" For standard Zernike on circular/annular apertures,")
|
||||
print(" recentering Z before subtraction IS equivalent to")
|
||||
print(" filtering J1 (piston) after Zernike fit.")
|
||||
print("\n The current implementation is correct!")
|
||||
else:
|
||||
print(" WARNING: Recentering and J1 filtering are NOT equivalent!")
|
||||
print(" Consider updating the extractor to recenter before subtraction.")
|
||||
Reference in New Issue
Block a user