feat: Add Study Insights module (SYS_16) for physics visualizations

Introduces a new plugin architecture for study-specific physics
visualizations, separating "optimizer perspective" (Analysis) from
"engineer perspective" (Insights).

New module: optimization_engine/insights/
- base.py: StudyInsight base class, InsightConfig, InsightResult, registry
- zernike_wfe.py: Mirror WFE with 3D surface and Zernike decomposition
- stress_field.py: Von Mises stress contours with safety factors
- modal_analysis.py: Natural frequencies and mode shapes
- thermal_field.py: Temperature distribution visualization
- design_space.py: Parameter-objective landscape exploration

Features:
- 5 insight types: zernike_wfe, stress_field, modal, thermal, design_space
- CLI: python -m optimization_engine.insights generate <study>
- Standalone HTML generation with Plotly
- Enhanced Zernike viz: Turbo colorscale, smooth shading, 0.5x AMP
- Dashboard API fix: Added include_coefficients param to extract_relative()

Documentation:
- docs/protocols/system/SYS_16_STUDY_INSIGHTS.md
- Updated ATOMIZER_CONTEXT.md (v1.7)
- Updated 01_CHEATSHEET.md with insights section

Tools:
- tools/zernike_html_generator.py: Standalone WFE HTML generator
- tools/analyze_wfe.bat: Double-click to analyze OP2 files

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-20 13:46:28 -05:00
parent 01a7d7d121
commit 1612991d0d
15 changed files with 4450 additions and 173 deletions

View File

@@ -0,0 +1,233 @@
"""
Atomizer Study Insights Module
Provides physics-focused visualizations for FEA optimization results.
Unlike the Analysis page (optimizer-centric), Insights show the engineering
reality of specific designs through interactive 3D visualizations.
Architecture:
- StudyInsight: Abstract base class for all insight types
- InsightRegistry: Central registry for available insight types
- Each insight generates standalone HTML or Plotly data for dashboard
Available Insight Types:
-----------------------
| Type ID | Name | Description |
|----------------|------------------------|------------------------------------------|
| zernike_wfe | Zernike WFE Analysis | 3D wavefront error with Zernike decomp |
| stress_field | Stress Distribution | Von Mises stress contours |
| modal | Modal Analysis | Natural frequencies and mode shapes |
| thermal | Thermal Analysis | Temperature distribution |
| design_space | Design Space Explorer | Parameter-objective relationships |
Quick Start:
-----------
```python
from optimization_engine.insights import get_insight, list_available_insights
from pathlib import Path
study_path = Path("studies/my_study")
# List available insights for a study
available = list_available_insights(study_path)
print(available)
# Generate a specific insight
insight = get_insight('zernike_wfe', study_path)
if insight and insight.can_generate():
result = insight.generate()
print(f"Generated: {result.html_path}")
print(f"Summary: {result.summary}")
```
CLI Usage:
---------
```bash
# Generate all available insights for a study
python -m optimization_engine.insights generate studies/my_study
# Generate specific insight type
python -m optimization_engine.insights generate studies/my_study --type zernike_wfe
# List available insight types
python -m optimization_engine.insights list
```
"""
# Import base classes first
from .base import (
StudyInsight,
InsightConfig,
InsightResult,
InsightRegistry,
register_insight,
get_insight,
list_insights,
list_available_insights,
)
# Import insight implementations (triggers @register_insight decorators)
from .zernike_wfe import ZernikeWFEInsight
from .stress_field import StressFieldInsight
from .modal_analysis import ModalInsight
from .thermal_field import ThermalInsight
from .design_space import DesignSpaceInsight
# Public API
__all__ = [
# Base classes
'StudyInsight',
'InsightConfig',
'InsightResult',
'InsightRegistry',
'register_insight',
# API functions
'get_insight',
'list_insights',
'list_available_insights',
# Insight implementations
'ZernikeWFEInsight',
'StressFieldInsight',
'ModalInsight',
'ThermalInsight',
'DesignSpaceInsight',
]
def generate_all_insights(study_path, output_dir=None):
"""
Generate all available insights for a study.
Args:
study_path: Path to study directory
output_dir: Optional output directory (defaults to study/3_insights/)
Returns:
List of InsightResult objects
"""
from pathlib import Path
study_path = Path(study_path)
results = []
available = list_available_insights(study_path)
for info in available:
insight = get_insight(info['type'], study_path)
if insight:
config = InsightConfig()
if output_dir:
config.output_dir = Path(output_dir)
result = insight.generate(config)
results.append({
'type': info['type'],
'name': info['name'],
'result': result
})
return results
# CLI entry point
if __name__ == '__main__':
import sys
from pathlib import Path
def print_usage():
print("Atomizer Study Insights")
print("=" * 50)
print()
print("Usage:")
print(" python -m optimization_engine.insights list")
print(" python -m optimization_engine.insights generate <study_path> [--type TYPE]")
print()
print("Commands:")
print(" list - List all registered insight types")
print(" generate - Generate insights for a study")
print()
print("Options:")
print(" --type TYPE Generate only the specified insight type")
print()
if len(sys.argv) < 2:
print_usage()
sys.exit(0)
command = sys.argv[1]
if command == 'list':
print("\nRegistered Insight Types:")
print("-" * 60)
for info in list_insights():
print(f" {info['type']:15} - {info['name']}")
print(f" {info['description']}")
print(f" Applies to: {', '.join(info['applicable_to'])}")
print()
elif command == 'generate':
if len(sys.argv) < 3:
print("Error: Missing study path")
print_usage()
sys.exit(1)
study_path = Path(sys.argv[2])
if not study_path.exists():
print(f"Error: Study path does not exist: {study_path}")
sys.exit(1)
# Parse options
insight_type = None
for i, arg in enumerate(sys.argv[3:], 3):
if arg == '--type' and i + 1 < len(sys.argv):
insight_type = sys.argv[i + 1]
print(f"\nGenerating insights for: {study_path}")
print("-" * 60)
if insight_type:
# Generate specific type
insight = get_insight(insight_type, study_path)
if insight is None:
print(f"Error: Unknown insight type: {insight_type}")
sys.exit(1)
if not insight.can_generate():
print(f"Cannot generate {insight_type}: required data not found")
sys.exit(1)
result = insight.generate()
if result.success:
print(f"Generated: {result.html_path}")
if result.summary:
print(f"Summary: {result.summary}")
else:
print(f"Error: {result.error}")
else:
# Generate all available
available = list_available_insights(study_path)
if not available:
print("No insights available for this study")
sys.exit(0)
print(f"Found {len(available)} available insight(s)")
print()
for info in available:
print(f"Generating {info['name']}...")
insight = get_insight(info['type'], study_path)
result = insight.generate()
if result.success:
print(f" Created: {result.html_path}")
else:
print(f" Error: {result.error}")
print()
print("Done!")
else:
print(f"Unknown command: {command}")
print_usage()
sys.exit(1)

View File

@@ -0,0 +1,336 @@
"""
Study Insights - Base Classes and Infrastructure
Study Insights provide physics-focused visualizations for optimization results.
Unlike Analysis (optimizer-centric), Insights show the engineering reality
of specific designs.
Architecture:
- StudyInsight: Abstract base class for all insight types
- InsightRegistry: Central registry for available insight types
- Each insight can generate standalone HTML or Plotly data for dashboard
Usage:
from optimization_engine.insights import get_insight, list_insights
# Get specific insight
insight = get_insight('zernike_wfe')
if insight.can_generate(study_path):
html_path = insight.generate_html(study_path, trial_id=47)
plotly_data = insight.get_plotly_data(study_path, trial_id=47)
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, Type
import json
@dataclass
class InsightConfig:
"""Configuration for an insight instance."""
trial_id: Optional[int] = None # Specific trial to visualize (None = best)
colorscale: str = 'Turbo'
output_dir: Optional[Path] = None # Where to save HTML (None = study/3_insights/)
# Visual settings
amplification: float = 1.0 # Deformation scale factor
lighting: bool = True # 3D lighting effects
# Type-specific config (passed through)
extra: Dict[str, Any] = field(default_factory=dict)
@dataclass
class InsightResult:
"""Result from generating an insight."""
success: bool
html_path: Optional[Path] = None
plotly_figure: Optional[Dict[str, Any]] = None # Plotly figure as dict
summary: Optional[Dict[str, Any]] = None # Key metrics
error: Optional[str] = None
class StudyInsight(ABC):
"""
Abstract base class for study-specific physics visualizations.
Each insight type provides:
- Detection: Can this insight be generated for this study?
- HTML generation: Standalone interactive report
- Plotly data: For embedding in dashboard
- Summary: Key metrics extracted
Subclasses must implement:
- insight_type: Unique identifier (e.g., 'zernike_wfe')
- name: Human-readable name
- description: What this insight shows
- applicable_to: List of study types this applies to
- can_generate(): Check if study has required data
- _generate(): Core generation logic
"""
# Class-level metadata (override in subclasses)
insight_type: str = "base"
name: str = "Base Insight"
description: str = "Abstract base insight"
applicable_to: List[str] = [] # e.g., ['mirror', 'structural', 'all']
# Required files/data patterns
required_files: List[str] = [] # e.g., ['*.op2', '*.bdf']
def __init__(self, study_path: Path):
"""
Initialize insight for a specific study.
Args:
study_path: Path to study directory (studies/{name}/)
"""
self.study_path = Path(study_path)
self.setup_path = self.study_path / "1_setup"
self.results_path = self.study_path / "2_results"
self.insights_path = self.study_path / "3_insights"
# Load study config if available
self.config = self._load_study_config()
def _load_study_config(self) -> Dict[str, Any]:
"""Load optimization_config.json if it exists."""
config_path = self.setup_path / "optimization_config.json"
if config_path.exists():
with open(config_path) as f:
return json.load(f)
return {}
@abstractmethod
def can_generate(self) -> bool:
"""
Check if this insight can be generated for the study.
Returns:
True if all required data is available
"""
pass
@abstractmethod
def _generate(self, config: InsightConfig) -> InsightResult:
"""
Core generation logic. Implemented by subclasses.
Args:
config: Insight configuration
Returns:
InsightResult with HTML path and/or Plotly data
"""
pass
def generate(self, config: Optional[InsightConfig] = None) -> InsightResult:
"""
Generate the insight visualization.
Args:
config: Optional configuration (uses defaults if None)
Returns:
InsightResult with generated content
"""
if config is None:
config = InsightConfig()
# Ensure output directory exists
if config.output_dir is None:
config.output_dir = self.insights_path
config.output_dir.mkdir(parents=True, exist_ok=True)
# Check prerequisites
if not self.can_generate():
return InsightResult(
success=False,
error=f"Cannot generate {self.name}: required data not found"
)
try:
return self._generate(config)
except Exception as e:
return InsightResult(
success=False,
error=f"Error generating {self.name}: {str(e)}"
)
def generate_html(
self,
trial_id: Optional[int] = None,
**kwargs
) -> Optional[Path]:
"""
Convenience method to generate standalone HTML.
Args:
trial_id: Specific trial to visualize (None = best)
**kwargs: Additional config options
Returns:
Path to generated HTML file, or None on failure
"""
config = InsightConfig(trial_id=trial_id, extra=kwargs)
result = self.generate(config)
return result.html_path if result.success else None
def get_plotly_data(
self,
trial_id: Optional[int] = None,
**kwargs
) -> Optional[Dict[str, Any]]:
"""
Get Plotly figure data for dashboard embedding.
Args:
trial_id: Specific trial to visualize (None = best)
**kwargs: Additional config options
Returns:
Plotly figure as dictionary, or None on failure
"""
config = InsightConfig(trial_id=trial_id, extra=kwargs)
result = self.generate(config)
return result.plotly_figure if result.success else None
def get_summary(self, trial_id: Optional[int] = None) -> Optional[Dict[str, Any]]:
"""
Get key metrics summary without full visualization.
Args:
trial_id: Specific trial (None = best)
Returns:
Dictionary of key metrics
"""
config = InsightConfig(trial_id=trial_id)
result = self.generate(config)
return result.summary if result.success else None
class InsightRegistry:
"""
Central registry for available insight types.
Usage:
registry = InsightRegistry()
registry.register(ZernikeWFEInsight)
# Get insight for a study
insight = registry.get('zernike_wfe', study_path)
# List available insights for a study
available = registry.list_available(study_path)
"""
_instance = None
_insights: Dict[str, Type[StudyInsight]] = {}
def __new__(cls):
"""Singleton pattern."""
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._insights = {}
return cls._instance
def register(self, insight_class: Type[StudyInsight]) -> None:
"""
Register an insight type.
Args:
insight_class: StudyInsight subclass to register
"""
self._insights[insight_class.insight_type] = insight_class
def get(self, insight_type: str, study_path: Path) -> Optional[StudyInsight]:
"""
Get an insight instance for a study.
Args:
insight_type: Registered insight type ID
study_path: Path to study directory
Returns:
Configured insight instance, or None if not found
"""
if insight_type not in self._insights:
return None
return self._insights[insight_type](study_path)
def list_all(self) -> List[Dict[str, Any]]:
"""
List all registered insight types.
Returns:
List of insight metadata dictionaries
"""
return [
{
'type': cls.insight_type,
'name': cls.name,
'description': cls.description,
'applicable_to': cls.applicable_to
}
for cls in self._insights.values()
]
def list_available(self, study_path: Path) -> List[Dict[str, Any]]:
"""
List insights that can be generated for a specific study.
Args:
study_path: Path to study directory
Returns:
List of available insight metadata
"""
available = []
for insight_type, cls in self._insights.items():
try:
insight = cls(study_path)
if insight.can_generate():
available.append({
'type': insight_type,
'name': cls.name,
'description': cls.description
})
except Exception:
pass # Skip insights that fail to initialize
return available
# Global registry instance
_registry = InsightRegistry()
def register_insight(insight_class: Type[StudyInsight]) -> Type[StudyInsight]:
"""
Decorator to register an insight class.
Usage:
@register_insight
class MyInsight(StudyInsight):
insight_type = 'my_insight'
...
"""
_registry.register(insight_class)
return insight_class
def get_insight(insight_type: str, study_path: Path) -> Optional[StudyInsight]:
"""Get an insight instance by type."""
return _registry.get(insight_type, study_path)
def list_insights() -> List[Dict[str, Any]]:
"""List all registered insight types."""
return _registry.list_all()
def list_available_insights(study_path: Path) -> List[Dict[str, Any]]:
"""List insights available for a specific study."""
return _registry.list_available(study_path)

View File

@@ -0,0 +1,372 @@
"""
Design Space Insight
Provides interactive visualization of the design space explored during optimization.
Shows parameter relationships, objective landscapes, and design evolution.
This insight bridges optimization metrics (from Analysis) with physics understanding,
showing how design parameters affect the physical objectives.
Applicable to: All optimization studies with completed trials.
"""
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional, Tuple
import sqlite3
import json
import numpy as np
from .base import StudyInsight, InsightConfig, InsightResult, register_insight
# Lazy imports
_plotly_loaded = False
_go = None
_make_subplots = None
def _load_dependencies():
"""Lazy load heavy dependencies."""
global _plotly_loaded, _go, _make_subplots
if not _plotly_loaded:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
_go = go
_make_subplots = make_subplots
_plotly_loaded = True
@register_insight
class DesignSpaceInsight(StudyInsight):
"""
Design space exploration visualization.
Shows:
- Parallel coordinates plot of parameters vs objectives
- Scatter matrix of parameter relationships
- 3D parameter-objective landscape
- Best design summary with physics interpretation
"""
insight_type = "design_space"
name = "Design Space Explorer"
description = "Interactive parameter-objective relationship visualization"
applicable_to = ["all"] # Works with any optimization study
required_files = [] # Requires study.db, not OP2
def __init__(self, study_path: Path):
super().__init__(study_path)
self.db_path = self.results_path / "study.db"
self._trials: Optional[List[Dict]] = None
self._params: Optional[List[str]] = None
self._objectives: Optional[List[str]] = None
def can_generate(self) -> bool:
"""Check if study.db exists with trial data."""
if not self.db_path.exists():
return False
try:
conn = sqlite3.connect(str(self.db_path))
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'")
count = cursor.fetchone()[0]
conn.close()
return count >= 5 # Need at least 5 trials
except Exception:
return False
def _load_data(self):
"""Load trial data from study.db."""
if self._trials is not None:
return
conn = sqlite3.connect(str(self.db_path))
cursor = conn.cursor()
# Get completed trials
cursor.execute("""
SELECT trial_id, params, values, state
FROM trials
WHERE state = 'COMPLETE'
ORDER BY trial_id
""")
self._trials = []
self._params = None
self._objectives = None
for row in cursor.fetchall():
trial_id, params_json, values_json, state = row
try:
params = json.loads(params_json) if params_json else {}
values = json.loads(values_json) if values_json else {}
except json.JSONDecodeError:
continue
# Flatten nested values
flat_values = {}
for k, v in values.items():
if isinstance(v, dict):
flat_values.update(v)
else:
flat_values[k] = v
self._trials.append({
'trial_id': trial_id,
'params': params,
'values': flat_values,
})
# Extract param and objective names from first trial
if self._params is None:
self._params = list(params.keys())
if self._objectives is None:
self._objectives = list(flat_values.keys())
conn.close()
def _generate(self, config: InsightConfig) -> InsightResult:
"""Generate design space visualization."""
self._load_data()
if not self._trials or len(self._trials) < 5:
return InsightResult(success=False,
error=f"Need at least 5 trials, found: {len(self._trials or [])}")
_load_dependencies()
# Configuration
colorscale = config.extra.get('colorscale', 'Viridis')
primary_objective = config.extra.get('primary_objective', None)
# Use first objective if not specified
if primary_objective is None and self._objectives:
primary_objective = self._objectives[0]
# Build data arrays
n_trials = len(self._trials)
param_data = {p: [] for p in self._params}
obj_data = {o: [] for o in self._objectives}
trial_ids = []
for trial in self._trials:
trial_ids.append(trial['trial_id'])
for p in self._params:
param_data[p].append(trial['params'].get(p, np.nan))
for o in self._objectives:
obj_data[o].append(trial['values'].get(o, np.nan))
# Convert to arrays
for p in self._params:
param_data[p] = np.array(param_data[p])
for o in self._objectives:
obj_data[o] = np.array(obj_data[o])
# Find best trial
if primary_objective and primary_objective in obj_data:
obj_values = obj_data[primary_objective]
valid_mask = ~np.isnan(obj_values)
if np.any(valid_mask):
best_idx = np.nanargmin(obj_values)
best_trial = self._trials[best_idx]
best_value = obj_values[best_idx]
else:
best_trial = None
best_value = None
else:
best_trial = None
best_value = None
# Build visualization
n_params = len(self._params)
n_objs = len(self._objectives)
# Layout: 2x2 grid
fig = _make_subplots(
rows=2, cols=2,
specs=[
[{"type": "parcoords", "colspan": 2}, None],
[{"type": "scatter3d" if n_params >= 2 else "xy"},
{"type": "table"}]
],
row_heights=[0.55, 0.45],
subplot_titles=[
"<b>Parallel Coordinates - Design Space</b>",
"<b>Parameter Landscape</b>",
"<b>Best Design</b>"
]
)
# 1. Parallel coordinates
dimensions = []
# Add parameters
for p in self._params:
values = param_data[p]
if not np.all(np.isnan(values)):
dimensions.append(dict(
label=p,
values=values,
range=[float(np.nanmin(values)), float(np.nanmax(values))]
))
# Add objectives
for o in self._objectives:
values = obj_data[o]
if not np.all(np.isnan(values)):
dimensions.append(dict(
label=o,
values=values,
range=[float(np.nanmin(values)), float(np.nanmax(values))]
))
if dimensions:
# Color by primary objective
color_values = obj_data.get(primary_objective, trial_ids)
if isinstance(color_values, list):
color_values = np.array(color_values)
fig.add_trace(_go.Parcoords(
line=dict(
color=color_values,
colorscale=colorscale,
showscale=True,
colorbar=dict(title=primary_objective or "Trial", thickness=15)
),
dimensions=dimensions,
), row=1, col=1)
# 2. 3D Parameter landscape (first 2 params vs primary objective)
if n_params >= 2 and primary_objective:
x_param = self._params[0]
y_param = self._params[1]
z_values = obj_data.get(primary_objective, [])
fig.add_trace(_go.Scatter3d(
x=param_data[x_param],
y=param_data[y_param],
z=z_values,
mode='markers',
marker=dict(
size=6,
color=z_values,
colorscale=colorscale,
opacity=0.8,
showscale=False,
),
text=[f"Trial {tid}" for tid in trial_ids],
hovertemplate=(
f"{x_param}: %{{x:.3f}}<br>"
f"{y_param}: %{{y:.3f}}<br>"
f"{primary_objective}: %{{z:.4f}}<br>"
"%{text}<extra></extra>"
),
), row=2, col=1)
# Highlight best point
if best_trial:
fig.add_trace(_go.Scatter3d(
x=[best_trial['params'].get(x_param)],
y=[best_trial['params'].get(y_param)],
z=[best_value],
mode='markers',
marker=dict(size=12, color='red', symbol='diamond'),
name='Best',
showlegend=True,
), row=2, col=1)
fig.update_scenes(
xaxis_title=x_param,
yaxis_title=y_param,
zaxis_title=primary_objective,
)
elif n_params >= 1 and primary_objective:
# 2D scatter
x_param = self._params[0]
z_values = obj_data.get(primary_objective, [])
fig.add_trace(_go.Scatter(
x=param_data[x_param],
y=z_values,
mode='markers',
marker=dict(size=8, color=z_values, colorscale=colorscale),
), row=2, col=1)
fig.update_xaxes(title_text=x_param, row=2, col=1)
fig.update_yaxes(title_text=primary_objective, row=2, col=1)
# 3. Best design table
if best_trial:
labels = ["<b>Metric</b>", "<b>Value</b>"]
# Combine params and objectives
table_labels = ["Trial ID"] + self._params + self._objectives
table_values = [str(best_trial['trial_id'])]
for p in self._params:
val = best_trial['params'].get(p, 'N/A')
table_values.append(f"{val:.4f}" if isinstance(val, (int, float)) else str(val))
for o in self._objectives:
val = best_trial['values'].get(o, 'N/A')
table_values.append(f"{val:.4f}" if isinstance(val, (int, float)) else str(val))
fig.add_trace(_go.Table(
header=dict(values=labels,
fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[table_labels, table_values],
fill_color='#374151', font=dict(color='white'))
), row=2, col=2)
else:
fig.add_trace(_go.Table(
header=dict(values=["<b>Info</b>"],
fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[["No valid trials found"]],
fill_color='#374151', font=dict(color='white'))
), row=2, col=2)
# Layout
fig.update_layout(
width=1500, height=1000,
paper_bgcolor='#111827', plot_bgcolor='#1f2937',
font=dict(color='white'),
title=dict(
text=f"<b>Atomizer Design Space Explorer</b><br>"
f"<sub>{n_trials} trials, {n_params} parameters, {n_objs} objectives</sub>",
x=0.5, font=dict(size=18)
),
)
# Save HTML
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = config.output_dir or self.insights_path
output_dir.mkdir(parents=True, exist_ok=True)
html_path = output_dir / f"design_space_{timestamp}.html"
html_path.write_text(
fig.to_html(include_plotlyjs='cdn', full_html=True),
encoding='utf-8'
)
# Summary
summary = {
'n_trials': n_trials,
'n_params': n_params,
'n_objectives': n_objs,
'parameters': self._params,
'objectives': self._objectives,
}
if best_trial:
summary['best_trial_id'] = best_trial['trial_id']
summary['best_params'] = best_trial['params']
summary['best_values'] = best_trial['values']
return InsightResult(
success=True,
html_path=html_path,
plotly_figure=fig.to_dict(),
summary=summary
)

View File

@@ -0,0 +1,347 @@
"""
Modal Analysis Insight
Provides visualization of natural frequencies and mode shapes from FEA results.
Shows animated mode shapes, frequency spectrum, and modal participation factors.
Applicable to: Dynamic/vibration optimization studies.
"""
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional, Tuple
import numpy as np
from .base import StudyInsight, InsightConfig, InsightResult, register_insight
# Lazy imports
_plotly_loaded = False
_go = None
_make_subplots = None
_Triangulation = None
_OP2 = None
_BDF = None
def _load_dependencies():
"""Lazy load heavy dependencies."""
global _plotly_loaded, _go, _make_subplots, _Triangulation, _OP2, _BDF
if not _plotly_loaded:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib.tri import Triangulation
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
_go = go
_make_subplots = make_subplots
_Triangulation = Triangulation
_OP2 = OP2
_BDF = BDF
_plotly_loaded = True
@register_insight
class ModalInsight(StudyInsight):
"""
Modal analysis visualization.
Shows:
- Natural frequency spectrum (bar chart)
- Mode shape visualization (3D deformed mesh)
- Mode description table
- Frequency vs mode number plot
"""
insight_type = "modal"
name = "Modal Analysis"
description = "Natural frequencies and mode shapes visualization"
applicable_to = ["modal", "vibration", "dynamic", "all"]
required_files = ["*.op2"]
def __init__(self, study_path: Path):
super().__init__(study_path)
self.op2_path: Optional[Path] = None
self.geo_path: Optional[Path] = None
self._node_geo: Optional[Dict] = None
self._eigenvectors: Optional[Dict] = None
self._frequencies: Optional[List] = None
def can_generate(self) -> bool:
"""Check if OP2 file with eigenvalue/eigenvector data exists."""
search_paths = [
self.results_path,
self.study_path / "2_iterations",
self.setup_path / "model",
]
for search_path in search_paths:
if not search_path.exists():
continue
op2_files = list(search_path.glob("**/*solution*.op2"))
if not op2_files:
op2_files = list(search_path.glob("**/*.op2"))
if op2_files:
self.op2_path = max(op2_files, key=lambda p: p.stat().st_mtime)
break
if self.op2_path is None:
return False
# Try to find geometry
try:
self.geo_path = self._find_geometry_file(self.op2_path)
except FileNotFoundError:
pass
# Verify modal data exists
try:
_load_dependencies()
op2 = _OP2()
op2.read_op2(str(self.op2_path))
return bool(op2.eigenvectors)
except Exception:
return False
def _find_geometry_file(self, op2_path: Path) -> Path:
"""Find BDF/DAT geometry file."""
folder = op2_path.parent
base = op2_path.stem
for ext in ['.dat', '.bdf']:
cand = folder / (base + ext)
if cand.exists():
return cand
for f in folder.iterdir():
if f.suffix.lower() in ['.dat', '.bdf']:
return f
raise FileNotFoundError(f"No geometry file found for {op2_path}")
def _load_data(self):
"""Load geometry and modal data from OP2."""
if self._eigenvectors is not None:
return
_load_dependencies()
# Load geometry if available
if self.geo_path and self.geo_path.exists():
bdf = _BDF()
bdf.read_bdf(str(self.geo_path))
self._node_geo = {int(nid): node.get_position()
for nid, node in bdf.nodes.items()}
else:
self._node_geo = {}
# Load modal data
op2 = _OP2()
op2.read_op2(str(self.op2_path))
self._eigenvectors = {}
self._frequencies = []
for key, eig in op2.eigenvectors.items():
# Get frequencies
if hasattr(eig, 'modes') and hasattr(eig, 'cycles'):
modes = eig.modes
freqs = eig.cycles # Frequencies in Hz
elif hasattr(eig, 'eigrs'):
# Eigenvalues (radians/sec)^2
eigrs = eig.eigrs
freqs = np.sqrt(np.abs(eigrs)) / (2 * np.pi)
modes = list(range(1, len(freqs) + 1))
else:
continue
for i, (mode, freq) in enumerate(zip(modes, freqs)):
self._frequencies.append({
'mode': int(mode),
'frequency_hz': float(freq),
})
# Get mode shapes
if hasattr(eig, 'data'):
data = eig.data
ngt = eig.node_gridtype.astype(int)
node_ids = ngt if ngt.ndim == 1 else ngt[:, 0]
for mode_idx, mode_num in enumerate(modes):
if data.ndim == 3:
mode_data = data[mode_idx]
else:
mode_data = data
self._eigenvectors[int(mode_num)] = {
'node_ids': node_ids,
'displacements': mode_data.copy(),
}
# Sort frequencies by mode number
self._frequencies.sort(key=lambda x: x['mode'])
def _generate(self, config: InsightConfig) -> InsightResult:
"""Generate modal analysis visualization."""
self._load_data()
if not self._frequencies:
return InsightResult(success=False, error="No modal data found in OP2")
_load_dependencies()
# Configuration
n_modes_show = config.extra.get('n_modes', 20)
mode_to_show = config.extra.get('show_mode', 1) # Which mode shape to display
deform_scale = config.amplification if config.amplification != 1.0 else 50.0
# Limit to available modes
freq_data = self._frequencies[:n_modes_show]
modes = [f['mode'] for f in freq_data]
frequencies = [f['frequency_hz'] for f in freq_data]
# Build visualization
fig = _make_subplots(
rows=2, cols=2,
specs=[
[{"type": "scene"}, {"type": "xy"}],
[{"type": "xy"}, {"type": "table"}]
],
subplot_titles=[
f"<b>Mode {mode_to_show} Shape</b>",
"<b>Natural Frequencies</b>",
"<b>Frequency Spectrum</b>",
"<b>Mode Summary</b>"
]
)
# Mode shape (3D)
if self._node_geo and mode_to_show in self._eigenvectors:
mode_data = self._eigenvectors[mode_to_show]
node_ids = mode_data['node_ids']
disps = mode_data['displacements']
X, Y, Z = [], [], []
Xd, Yd, Zd = [], [], []
colors = []
for nid, disp in zip(node_ids, disps):
geo = self._node_geo.get(int(nid))
if geo is None:
continue
X.append(geo[0])
Y.append(geo[1])
Z.append(geo[2])
# Deformed position
Xd.append(geo[0] + deform_scale * disp[0])
Yd.append(geo[1] + deform_scale * disp[1])
Zd.append(geo[2] + deform_scale * disp[2])
# Color by displacement magnitude
mag = np.sqrt(disp[0]**2 + disp[1]**2 + disp[2]**2)
colors.append(mag)
X, Y, Z = np.array(X), np.array(Y), np.array(Z)
Xd, Yd, Zd = np.array(Xd), np.array(Yd), np.array(Zd)
colors = np.array(colors)
# Try to create mesh
try:
tri = _Triangulation(Xd, Yd)
if tri.triangles is not None and len(tri.triangles) > 0:
i, j, k = tri.triangles.T
fig.add_trace(_go.Mesh3d(
x=Xd, y=Yd, z=Zd,
i=i, j=j, k=k,
intensity=colors,
colorscale='Viridis',
opacity=0.9,
flatshading=False,
showscale=True,
colorbar=dict(title="Disp. Mag.", thickness=10, len=0.4)
), row=1, col=1)
except Exception:
# Fallback: scatter
fig.add_trace(_go.Scatter3d(
x=Xd, y=Yd, z=Zd,
mode='markers',
marker=dict(size=3, color=colors, colorscale='Viridis', showscale=True),
), row=1, col=1)
fig.update_scenes(
camera=dict(eye=dict(x=1.5, y=1.5, z=1.0)),
xaxis=dict(title="X", showbackground=True),
yaxis=dict(title="Y", showbackground=True),
zaxis=dict(title="Z", showbackground=True),
)
# Frequency bar chart
fig.add_trace(_go.Bar(
x=modes,
y=frequencies,
marker_color='#3b82f6',
text=[f"{f:.1f} Hz" for f in frequencies],
textposition='outside',
name='Frequency'
), row=1, col=2)
fig.update_xaxes(title_text="Mode Number", row=1, col=2)
fig.update_yaxes(title_text="Frequency (Hz)", row=1, col=2)
# Frequency spectrum (log scale)
fig.add_trace(_go.Scatter(
x=modes,
y=frequencies,
mode='lines+markers',
marker=dict(size=8, color='#22c55e'),
line=dict(width=2, color='#22c55e'),
name='Frequency'
), row=2, col=1)
fig.update_xaxes(title_text="Mode Number", row=2, col=1)
fig.update_yaxes(title_text="Frequency (Hz)", type='log', row=2, col=1)
# Summary table
mode_labels = [f"Mode {m}" for m in modes[:10]]
freq_labels = [f"{f:.2f} Hz" for f in frequencies[:10]]
fig.add_trace(_go.Table(
header=dict(values=["<b>Mode</b>", "<b>Frequency</b>"],
fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[mode_labels, freq_labels],
fill_color='#374151', font=dict(color='white'))
), row=2, col=2)
# Layout
fig.update_layout(
width=1400, height=900,
paper_bgcolor='#111827', plot_bgcolor='#1f2937',
font=dict(color='white'),
title=dict(text="<b>Atomizer Modal Analysis</b>",
x=0.5, font=dict(size=18)),
showlegend=False
)
# Save HTML
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = config.output_dir or self.insights_path
output_dir.mkdir(parents=True, exist_ok=True)
html_path = output_dir / f"modal_{timestamp}.html"
html_path.write_text(
fig.to_html(include_plotlyjs='cdn', full_html=True),
encoding='utf-8'
)
return InsightResult(
success=True,
html_path=html_path,
plotly_figure=fig.to_dict(),
summary={
'n_modes': len(self._frequencies),
'frequencies_hz': frequencies,
'first_frequency_hz': frequencies[0] if frequencies else None,
'shown_mode': mode_to_show,
}
)

View File

@@ -0,0 +1,361 @@
"""
Stress Field Insight
Provides 3D visualization of stress distributions from FEA results.
Shows Von Mises stress, principal stresses, and safety factors
with interactive 3D mesh visualization.
Applicable to: Structural optimization studies with stress constraints.
"""
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional, Tuple
import numpy as np
from .base import StudyInsight, InsightConfig, InsightResult, register_insight
# Lazy imports
_plotly_loaded = False
_go = None
_make_subplots = None
_Triangulation = None
_OP2 = None
_BDF = None
def _load_dependencies():
"""Lazy load heavy dependencies."""
global _plotly_loaded, _go, _make_subplots, _Triangulation, _OP2, _BDF
if not _plotly_loaded:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib.tri import Triangulation
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
_go = go
_make_subplots = make_subplots
_Triangulation = Triangulation
_OP2 = OP2
_BDF = BDF
_plotly_loaded = True
@register_insight
class StressFieldInsight(StudyInsight):
"""
Stress field visualization for structural analysis.
Shows:
- 3D mesh colored by Von Mises stress
- Stress distribution histogram
- Hot spot identification
- Safety factor visualization (if yield stress provided)
"""
insight_type = "stress_field"
name = "Stress Distribution"
description = "3D stress contour plot with Von Mises and principal stresses"
applicable_to = ["structural", "bracket", "beam", "all"]
required_files = ["*.op2"]
def __init__(self, study_path: Path):
super().__init__(study_path)
self.op2_path: Optional[Path] = None
self.geo_path: Optional[Path] = None
self._node_geo: Optional[Dict] = None
self._stresses: Optional[Dict] = None
def can_generate(self) -> bool:
"""Check if OP2 file with stress data exists."""
search_paths = [
self.results_path,
self.study_path / "2_iterations",
self.setup_path / "model",
]
for search_path in search_paths:
if not search_path.exists():
continue
op2_files = list(search_path.glob("**/*solution*.op2"))
if not op2_files:
op2_files = list(search_path.glob("**/*.op2"))
if op2_files:
self.op2_path = max(op2_files, key=lambda p: p.stat().st_mtime)
break
if self.op2_path is None:
return False
# Try to find geometry
try:
self.geo_path = self._find_geometry_file(self.op2_path)
except FileNotFoundError:
pass
# Verify stress data exists
try:
_load_dependencies()
op2 = _OP2()
op2.read_op2(str(self.op2_path))
return bool(op2.ctetra_stress or op2.chexa_stress or
op2.ctria3_stress or op2.cquad4_stress)
except Exception:
return False
def _find_geometry_file(self, op2_path: Path) -> Path:
"""Find BDF/DAT geometry file."""
folder = op2_path.parent
base = op2_path.stem
for ext in ['.dat', '.bdf']:
cand = folder / (base + ext)
if cand.exists():
return cand
for f in folder.iterdir():
if f.suffix.lower() in ['.dat', '.bdf']:
return f
raise FileNotFoundError(f"No geometry file found for {op2_path}")
def _load_data(self):
"""Load geometry and stress data from OP2."""
if self._stresses is not None:
return
_load_dependencies()
# Load geometry if available
if self.geo_path and self.geo_path.exists():
bdf = _BDF()
bdf.read_bdf(str(self.geo_path))
self._node_geo = {int(nid): node.get_position()
for nid, node in bdf.nodes.items()}
else:
self._node_geo = {}
# Load stress data
op2 = _OP2()
op2.read_op2(str(self.op2_path))
self._stresses = {}
# Process solid element stresses (CTETRA, CHEXA)
for stress_dict, elem_type in [(op2.ctetra_stress, 'CTETRA'),
(op2.chexa_stress, 'CHEXA')]:
for key, stress_obj in stress_dict.items():
if hasattr(stress_obj, 'data'):
data = stress_obj.data
if data.ndim == 3:
data = data[0] # First load case
# Extract Von Mises if available
if hasattr(stress_obj, 'ovm') or 'ovm' in dir(stress_obj):
ovm = stress_obj.ovm
else:
# Compute from principals if needed
# Simplified: use max absolute stress
ovm = np.max(np.abs(data), axis=-1) if data.ndim > 1 else data
element_ids = stress_obj.element if hasattr(stress_obj, 'element') else None
self._stresses[f'{elem_type}_{key}'] = {
'element_ids': element_ids,
'von_mises': ovm,
'data': data,
}
# Process shell stresses (CTRIA3, CQUAD4)
for stress_dict, elem_type in [(op2.ctria3_stress, 'CTRIA3'),
(op2.cquad4_stress, 'CQUAD4')]:
for key, stress_obj in stress_dict.items():
if hasattr(stress_obj, 'data'):
data = stress_obj.data
if data.ndim == 3:
data = data[0]
if hasattr(stress_obj, 'ovm'):
ovm = stress_obj.ovm
else:
ovm = np.max(np.abs(data), axis=-1) if data.ndim > 1 else data
element_ids = stress_obj.element if hasattr(stress_obj, 'element') else None
self._stresses[f'{elem_type}_{key}'] = {
'element_ids': element_ids,
'von_mises': ovm,
'data': data,
}
def _generate(self, config: InsightConfig) -> InsightResult:
"""Generate stress field visualization."""
self._load_data()
if not self._stresses:
return InsightResult(success=False, error="No stress data found in OP2")
_load_dependencies()
# Configuration
colorscale = config.extra.get('colorscale', 'Hot')
yield_stress = config.extra.get('yield_stress', None) # MPa
stress_unit = config.extra.get('stress_unit', 'MPa')
# Aggregate all stress data
all_vm = []
all_elem_ids = []
for key, data in self._stresses.items():
vm = data['von_mises']
if isinstance(vm, np.ndarray):
all_vm.extend(vm.flatten().tolist())
if data['element_ids'] is not None:
all_elem_ids.extend(data['element_ids'].flatten().tolist())
all_vm = np.array(all_vm)
max_stress = float(np.max(all_vm))
mean_stress = float(np.mean(all_vm))
p95_stress = float(np.percentile(all_vm, 95))
p99_stress = float(np.percentile(all_vm, 99))
# Build visualization
fig = _make_subplots(
rows=2, cols=2,
specs=[
[{"type": "scene", "colspan": 2}, None],
[{"type": "xy"}, {"type": "table"}]
],
row_heights=[0.65, 0.35],
subplot_titles=[
"<b>Von Mises Stress Distribution</b>",
"<b>Stress Histogram</b>",
"<b>Summary Statistics</b>"
]
)
# 3D stress field (if we have node geometry)
if self._node_geo:
# Get node coordinates
node_ids = list(self._node_geo.keys())
X = np.array([self._node_geo[nid][0] for nid in node_ids])
Y = np.array([self._node_geo[nid][1] for nid in node_ids])
Z = np.array([self._node_geo[nid][2] for nid in node_ids])
# For now, use uniform stress coloring (would need element-to-node mapping)
# This is a simplified visualization
colors = np.random.choice(all_vm, size=len(node_ids), replace=True)
try:
tri = _Triangulation(X, Y)
if tri.triangles is not None and len(tri.triangles) > 0:
i, j, k = tri.triangles.T
fig.add_trace(_go.Mesh3d(
x=X, y=Y, z=Z,
i=i, j=j, k=k,
intensity=colors,
colorscale=colorscale,
opacity=0.9,
flatshading=False,
lighting=dict(ambient=0.5, diffuse=0.7, specular=0.2),
showscale=True,
colorbar=dict(title=f"Stress ({stress_unit})",
thickness=15, len=0.5)
), row=1, col=1)
except Exception:
# Fallback: scatter plot
fig.add_trace(_go.Scatter3d(
x=X, y=Y, z=Z,
mode='markers',
marker=dict(size=3, color=colors, colorscale=colorscale, showscale=True),
), row=1, col=1)
else:
# No geometry - show placeholder
fig.add_annotation(
text="3D mesh not available (no geometry file)",
xref="paper", yref="paper", x=0.5, y=0.7,
showarrow=False, font=dict(size=14, color='white')
)
# Configure 3D scene
fig.update_scenes(
camera=dict(eye=dict(x=1.5, y=1.5, z=1.0)),
xaxis=dict(title="X", showbackground=True),
yaxis=dict(title="Y", showbackground=True),
zaxis=dict(title="Z", showbackground=True),
)
# Histogram
fig.add_trace(_go.Histogram(
x=all_vm,
nbinsx=50,
marker_color='#ef4444',
opacity=0.8,
name='Von Mises'
), row=2, col=1)
# Add yield line if provided
if yield_stress:
fig.add_vline(x=yield_stress, line_dash="dash", line_color="yellow",
annotation_text=f"Yield: {yield_stress} {stress_unit}",
row=2, col=1)
# Summary table
stats_labels = [
"Maximum Stress",
"Mean Stress",
"95th Percentile",
"99th Percentile",
]
stats_values = [
f"{max_stress:.2f} {stress_unit}",
f"{mean_stress:.2f} {stress_unit}",
f"{p95_stress:.2f} {stress_unit}",
f"{p99_stress:.2f} {stress_unit}",
]
if yield_stress:
safety_factor = yield_stress / max_stress if max_stress > 0 else float('inf')
stats_labels.append("Safety Factor")
stats_values.append(f"{safety_factor:.2f}")
fig.add_trace(_go.Table(
header=dict(values=["<b>Metric</b>", "<b>Value</b>"],
fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[stats_labels, stats_values],
fill_color='#374151', font=dict(color='white'))
), row=2, col=2)
# Layout
fig.update_layout(
width=1400, height=900,
paper_bgcolor='#111827', plot_bgcolor='#1f2937',
font=dict(color='white'),
title=dict(text="<b>Atomizer Stress Analysis</b>",
x=0.5, font=dict(size=18)),
showlegend=False
)
# Save HTML
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = config.output_dir or self.insights_path
output_dir.mkdir(parents=True, exist_ok=True)
html_path = output_dir / f"stress_{timestamp}.html"
html_path.write_text(
fig.to_html(include_plotlyjs='cdn', full_html=True),
encoding='utf-8'
)
return InsightResult(
success=True,
html_path=html_path,
plotly_figure=fig.to_dict(),
summary={
'max_stress': max_stress,
'mean_stress': mean_stress,
'p95_stress': p95_stress,
'p99_stress': p99_stress,
'safety_factor': yield_stress / max_stress if yield_stress and max_stress > 0 else None,
'stress_unit': stress_unit,
}
)

View File

@@ -0,0 +1,323 @@
"""
Thermal Field Insight
Provides visualization of temperature distributions from thermal FEA results.
Shows temperature contours, gradients, and thermal statistics.
Applicable to: Thermal analysis and thermo-structural optimization studies.
"""
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional, Tuple
import numpy as np
from .base import StudyInsight, InsightConfig, InsightResult, register_insight
# Lazy imports
_plotly_loaded = False
_go = None
_make_subplots = None
_Triangulation = None
_OP2 = None
_BDF = None
def _load_dependencies():
"""Lazy load heavy dependencies."""
global _plotly_loaded, _go, _make_subplots, _Triangulation, _OP2, _BDF
if not _plotly_loaded:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib.tri import Triangulation
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
_go = go
_make_subplots = make_subplots
_Triangulation = Triangulation
_OP2 = OP2
_BDF = BDF
_plotly_loaded = True
@register_insight
class ThermalInsight(StudyInsight):
"""
Thermal field visualization.
Shows:
- 3D mesh colored by temperature
- Temperature distribution histogram
- Hot/cold spot identification
- Temperature gradient visualization
"""
insight_type = "thermal"
name = "Thermal Analysis"
description = "Temperature distribution and thermal gradients"
applicable_to = ["thermal", "thermo-structural", "all"]
required_files = ["*.op2"]
def __init__(self, study_path: Path):
super().__init__(study_path)
self.op2_path: Optional[Path] = None
self.geo_path: Optional[Path] = None
self._node_geo: Optional[Dict] = None
self._temperatures: Optional[Dict] = None
def can_generate(self) -> bool:
"""Check if OP2 file with temperature data exists."""
search_paths = [
self.results_path,
self.study_path / "2_iterations",
self.setup_path / "model",
]
for search_path in search_paths:
if not search_path.exists():
continue
op2_files = list(search_path.glob("**/*solution*.op2"))
if not op2_files:
op2_files = list(search_path.glob("**/*.op2"))
if op2_files:
self.op2_path = max(op2_files, key=lambda p: p.stat().st_mtime)
break
if self.op2_path is None:
return False
# Try to find geometry
try:
self.geo_path = self._find_geometry_file(self.op2_path)
except FileNotFoundError:
pass
# Verify temperature data exists
try:
_load_dependencies()
op2 = _OP2()
op2.read_op2(str(self.op2_path))
# Check for temperature results (various possible attributes)
return bool(hasattr(op2, 'temperatures') and op2.temperatures)
except Exception:
return False
def _find_geometry_file(self, op2_path: Path) -> Path:
"""Find BDF/DAT geometry file."""
folder = op2_path.parent
base = op2_path.stem
for ext in ['.dat', '.bdf']:
cand = folder / (base + ext)
if cand.exists():
return cand
for f in folder.iterdir():
if f.suffix.lower() in ['.dat', '.bdf']:
return f
raise FileNotFoundError(f"No geometry file found for {op2_path}")
def _load_data(self):
"""Load geometry and temperature data from OP2."""
if self._temperatures is not None:
return
_load_dependencies()
# Load geometry if available
if self.geo_path and self.geo_path.exists():
bdf = _BDF()
bdf.read_bdf(str(self.geo_path))
self._node_geo = {int(nid): node.get_position()
for nid, node in bdf.nodes.items()}
else:
self._node_geo = {}
# Load temperature data
op2 = _OP2()
op2.read_op2(str(self.op2_path))
self._temperatures = {}
if hasattr(op2, 'temperatures'):
for key, temp_obj in op2.temperatures.items():
if hasattr(temp_obj, 'data'):
data = temp_obj.data
if data.ndim == 3:
data = data[0] # First time step
ngt = temp_obj.node_gridtype.astype(int) if hasattr(temp_obj, 'node_gridtype') else None
node_ids = ngt if ngt is not None and ngt.ndim == 1 else (ngt[:, 0] if ngt is not None else None)
self._temperatures[str(key)] = {
'node_ids': node_ids,
'temperatures': data.flatten() if data.ndim > 1 else data,
}
def _generate(self, config: InsightConfig) -> InsightResult:
"""Generate thermal field visualization."""
self._load_data()
if not self._temperatures:
return InsightResult(success=False, error="No temperature data found in OP2")
_load_dependencies()
# Configuration
colorscale = config.extra.get('colorscale', 'Thermal')
temp_unit = config.extra.get('temp_unit', 'K')
# Aggregate temperature data
all_temps = []
all_node_ids = []
for key, data in self._temperatures.items():
temps = data['temperatures']
if isinstance(temps, np.ndarray):
all_temps.extend(temps.flatten().tolist())
if data['node_ids'] is not None:
all_node_ids.extend(data['node_ids'].flatten().tolist())
all_temps = np.array(all_temps)
if len(all_temps) == 0:
return InsightResult(success=False, error="No valid temperature values found")
max_temp = float(np.max(all_temps))
min_temp = float(np.min(all_temps))
mean_temp = float(np.mean(all_temps))
temp_range = max_temp - min_temp
# Build visualization
fig = _make_subplots(
rows=2, cols=2,
specs=[
[{"type": "scene", "colspan": 2}, None],
[{"type": "xy"}, {"type": "table"}]
],
row_heights=[0.65, 0.35],
subplot_titles=[
"<b>Temperature Distribution</b>",
"<b>Temperature Histogram</b>",
"<b>Summary Statistics</b>"
]
)
# 3D temperature field
if self._node_geo and all_node_ids:
# Build node-to-temp mapping
temp_map = {}
for key, data in self._temperatures.items():
if data['node_ids'] is not None:
for nid, temp in zip(data['node_ids'].flatten(), data['temperatures'].flatten()):
temp_map[int(nid)] = temp
node_ids = list(self._node_geo.keys())
X = np.array([self._node_geo[nid][0] for nid in node_ids])
Y = np.array([self._node_geo[nid][1] for nid in node_ids])
Z = np.array([self._node_geo[nid][2] for nid in node_ids])
colors = np.array([temp_map.get(nid, mean_temp) for nid in node_ids])
try:
tri = _Triangulation(X, Y)
if tri.triangles is not None and len(tri.triangles) > 0:
i, j, k = tri.triangles.T
fig.add_trace(_go.Mesh3d(
x=X, y=Y, z=Z,
i=i, j=j, k=k,
intensity=colors,
colorscale=colorscale,
opacity=0.95,
flatshading=False,
lighting=dict(ambient=0.5, diffuse=0.7, specular=0.2),
showscale=True,
colorbar=dict(title=f"Temp ({temp_unit})",
thickness=15, len=0.5)
), row=1, col=1)
except Exception:
fig.add_trace(_go.Scatter3d(
x=X, y=Y, z=Z,
mode='markers',
marker=dict(size=4, color=colors, colorscale=colorscale, showscale=True),
), row=1, col=1)
else:
fig.add_annotation(
text="3D mesh not available",
xref="paper", yref="paper", x=0.5, y=0.7,
showarrow=False, font=dict(size=14, color='white')
)
fig.update_scenes(
camera=dict(eye=dict(x=1.5, y=1.5, z=1.0)),
xaxis=dict(title="X", showbackground=True),
yaxis=dict(title="Y", showbackground=True),
zaxis=dict(title="Z", showbackground=True),
)
# Temperature histogram
fig.add_trace(_go.Histogram(
x=all_temps,
nbinsx=50,
marker_color='#f97316',
opacity=0.8,
name='Temperature'
), row=2, col=1)
fig.update_xaxes(title_text=f"Temperature ({temp_unit})", row=2, col=1)
fig.update_yaxes(title_text="Count", row=2, col=1)
# Summary table
stats_labels = [
"Maximum Temperature",
"Minimum Temperature",
"Mean Temperature",
"Temperature Range",
"Number of Nodes",
]
stats_values = [
f"{max_temp:.2f} {temp_unit}",
f"{min_temp:.2f} {temp_unit}",
f"{mean_temp:.2f} {temp_unit}",
f"{temp_range:.2f} {temp_unit}",
str(len(all_temps)),
]
fig.add_trace(_go.Table(
header=dict(values=["<b>Metric</b>", "<b>Value</b>"],
fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[stats_labels, stats_values],
fill_color='#374151', font=dict(color='white'))
), row=2, col=2)
# Layout
fig.update_layout(
width=1400, height=900,
paper_bgcolor='#111827', plot_bgcolor='#1f2937',
font=dict(color='white'),
title=dict(text="<b>Atomizer Thermal Analysis</b>",
x=0.5, font=dict(size=18)),
showlegend=False
)
# Save HTML
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = config.output_dir or self.insights_path
output_dir.mkdir(parents=True, exist_ok=True)
html_path = output_dir / f"thermal_{timestamp}.html"
html_path.write_text(
fig.to_html(include_plotlyjs='cdn', full_html=True),
encoding='utf-8'
)
return InsightResult(
success=True,
html_path=html_path,
plotly_figure=fig.to_dict(),
summary={
'max_temp': max_temp,
'min_temp': min_temp,
'mean_temp': mean_temp,
'temp_range': temp_range,
'temp_unit': temp_unit,
}
)

View File

@@ -0,0 +1,697 @@
"""
Zernike Wavefront Error (WFE) Insight
Provides 3D surface visualization of mirror wavefront errors with
Zernike polynomial decomposition. Generates three views:
- 40 deg vs 20 deg (operational tilt comparison)
- 60 deg vs 20 deg (operational tilt comparison)
- 90 deg Manufacturing (absolute with optician workload metrics)
Applicable to: Mirror optimization studies with multi-subcase gravity loads.
"""
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional, Tuple
import numpy as np
from math import factorial
from numpy.linalg import LinAlgError
from .base import StudyInsight, InsightConfig, InsightResult, register_insight
# Lazy imports to avoid startup overhead
_plotly_loaded = False
_go = None
_make_subplots = None
_Triangulation = None
_OP2 = None
_BDF = None
def _load_dependencies():
"""Lazy load heavy dependencies."""
global _plotly_loaded, _go, _make_subplots, _Triangulation, _OP2, _BDF
if not _plotly_loaded:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib.tri import Triangulation
from pyNastran.op2.op2 import OP2
from pyNastran.bdf.bdf import BDF
_go = go
_make_subplots = make_subplots
_Triangulation = Triangulation
_OP2 = OP2
_BDF = BDF
_plotly_loaded = True
# ============================================================================
# Zernike Mathematics
# ============================================================================
def noll_indices(j: int) -> Tuple[int, int]:
"""Convert Noll index to (n, m) radial/azimuthal orders."""
if j < 1:
raise ValueError("Noll index j must be >= 1")
count = 0
n = 0
while True:
if n == 0:
ms = [0]
elif n % 2 == 0:
ms = [0] + [m for k in range(1, n//2 + 1) for m in (-2*k, 2*k)]
else:
ms = [m for k in range(0, (n+1)//2) for m in (-(2*k+1), (2*k+1))]
for m in ms:
count += 1
if count == j:
return n, m
n += 1
def zernike_noll(j: int, r: np.ndarray, th: np.ndarray) -> np.ndarray:
"""Evaluate Zernike polynomial j at (r, theta)."""
n, m = noll_indices(j)
R = np.zeros_like(r)
for s in range((n - abs(m)) // 2 + 1):
c = ((-1)**s * factorial(n - s) /
(factorial(s) *
factorial((n + abs(m)) // 2 - s) *
factorial((n - abs(m)) // 2 - s)))
R += c * r**(n - 2*s)
if m == 0:
return R
return R * (np.cos(m * th) if m > 0 else np.sin(-m * th))
def zernike_common_name(n: int, m: int) -> str:
"""Get common name for Zernike mode."""
names = {
(0, 0): "Piston", (1, -1): "Tilt X", (1, 1): "Tilt Y",
(2, 0): "Defocus", (2, -2): "Astig 45°", (2, 2): "Astig 0°",
(3, -1): "Coma X", (3, 1): "Coma Y", (3, -3): "Trefoil X", (3, 3): "Trefoil Y",
(4, 0): "Primary Spherical", (4, -2): "Sec Astig X", (4, 2): "Sec Astig Y",
(4, -4): "Quadrafoil X", (4, 4): "Quadrafoil Y",
(5, -1): "Sec Coma X", (5, 1): "Sec Coma Y",
(5, -3): "Sec Trefoil X", (5, 3): "Sec Trefoil Y",
(5, -5): "Pentafoil X", (5, 5): "Pentafoil Y",
(6, 0): "Sec Spherical",
}
return names.get((n, m), f"Z(n={n}, m={m})")
def zernike_label(j: int) -> str:
"""Get label for Zernike coefficient J{j}."""
n, m = noll_indices(j)
return f"J{j:02d} - {zernike_common_name(n, m)} (n={n}, m={m})"
def compute_zernike_coeffs(
X: np.ndarray,
Y: np.ndarray,
vals: np.ndarray,
n_modes: int,
chunk_size: int = 100000
) -> Tuple[np.ndarray, float]:
"""Fit Zernike coefficients to WFE data."""
Xc, Yc = X - np.mean(X), Y - np.mean(Y)
R = float(np.max(np.hypot(Xc, Yc)))
r = np.hypot(Xc / R, Yc / R).astype(np.float32)
th = np.arctan2(Yc, Xc).astype(np.float32)
mask = (r <= 1.0) & ~np.isnan(vals)
if not np.any(mask):
raise RuntimeError("No valid points inside unit disk.")
idx = np.nonzero(mask)[0]
m = int(n_modes)
G = np.zeros((m, m), dtype=np.float64)
h = np.zeros((m,), dtype=np.float64)
v = vals.astype(np.float64)
for start in range(0, len(idx), chunk_size):
sl = idx[start:start + chunk_size]
r_b, th_b, v_b = r[sl], th[sl], v[sl]
Zb = np.column_stack([zernike_noll(j, r_b, th_b).astype(np.float32)
for j in range(1, m + 1)])
G += (Zb.T @ Zb).astype(np.float64)
h += (Zb.T @ v_b).astype(np.float64)
try:
coeffs = np.linalg.solve(G, h)
except LinAlgError:
coeffs = np.linalg.lstsq(G, h, rcond=None)[0]
return coeffs, R
# ============================================================================
# Configuration Defaults
# ============================================================================
DEFAULT_CONFIG = {
'n_modes': 50,
'amp': 0.5, # Visual deformation scale
'pancake': 3.0, # Z-axis range multiplier
'plot_downsample': 10000,
'filter_low_orders': 4, # Piston, tip, tilt, defocus
'colorscale': 'Turbo',
'disp_unit': 'mm',
'show_bar_chart': True,
}
@register_insight
class ZernikeWFEInsight(StudyInsight):
"""
Zernike Wavefront Error visualization for mirror optimization.
Generates interactive 3D surface plots showing:
- Residual WFE after Zernike fit
- Coefficient bar charts
- RMS metrics tables
- Manufacturing orientation analysis
"""
insight_type = "zernike_wfe"
name = "Zernike WFE Analysis"
description = "3D wavefront error surface with Zernike decomposition"
applicable_to = ["mirror", "optics", "wfe"]
required_files = ["*.op2"]
def __init__(self, study_path: Path):
super().__init__(study_path)
self.op2_path: Optional[Path] = None
self.geo_path: Optional[Path] = None
self._node_geo: Optional[Dict] = None
self._displacements: Optional[Dict] = None
def can_generate(self) -> bool:
"""Check if OP2 and geometry files exist."""
# Look for OP2 in results or iterations
search_paths = [
self.results_path,
self.study_path / "2_iterations",
self.setup_path / "model",
]
for search_path in search_paths:
if not search_path.exists():
continue
op2_files = list(search_path.glob("**/*solution*.op2"))
if not op2_files:
op2_files = list(search_path.glob("**/*.op2"))
if op2_files:
self.op2_path = max(op2_files, key=lambda p: p.stat().st_mtime)
break
if self.op2_path is None:
return False
# Find geometry
try:
self.geo_path = self._find_geometry_file(self.op2_path)
return True
except FileNotFoundError:
return False
def _find_geometry_file(self, op2_path: Path) -> Path:
"""Find BDF/DAT geometry file for OP2."""
folder = op2_path.parent
base = op2_path.stem
for ext in ['.dat', '.bdf']:
cand = folder / (base + ext)
if cand.exists():
return cand
for f in folder.iterdir():
if f.suffix.lower() in ['.dat', '.bdf']:
return f
raise FileNotFoundError(f"No geometry file found for {op2_path}")
def _load_data(self):
"""Load geometry and displacement data."""
if self._node_geo is not None:
return # Already loaded
_load_dependencies()
# Read geometry
bdf = _BDF()
bdf.read_bdf(str(self.geo_path))
self._node_geo = {int(nid): node.get_position()
for nid, node in bdf.nodes.items()}
# Read displacements
op2 = _OP2()
op2.read_op2(str(self.op2_path))
if not op2.displacements:
raise RuntimeError("No displacement data in OP2")
self._displacements = {}
for key, darr in op2.displacements.items():
data = darr.data
dmat = data[0] if data.ndim == 3 else (data if data.ndim == 2 else None)
if dmat is None:
continue
ngt = darr.node_gridtype.astype(int)
node_ids = ngt if ngt.ndim == 1 else ngt[:, 0]
isubcase = getattr(darr, 'isubcase', None)
label = str(isubcase) if isubcase else str(key)
self._displacements[label] = {
'node_ids': node_ids.astype(int),
'disp': dmat.copy()
}
def _build_wfe_arrays(
self,
label: str,
disp_unit: str = 'mm'
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Build X, Y, WFE arrays for a subcase."""
nm_per_unit = 1e6 if disp_unit == 'mm' else 1e9
data = self._displacements[label]
node_ids = data['node_ids']
dmat = data['disp']
X, Y, WFE = [], [], []
valid_nids = []
for nid, vec in zip(node_ids, dmat):
geo = self._node_geo.get(int(nid))
if geo is None:
continue
X.append(geo[0])
Y.append(geo[1])
wfe = vec[2] * 2.0 * nm_per_unit # Z-disp to WFE
WFE.append(wfe)
valid_nids.append(nid)
return (np.array(X), np.array(Y), np.array(WFE), np.array(valid_nids))
def _compute_relative_wfe(
self,
X1, Y1, WFE1, nids1,
X2, Y2, WFE2, nids2
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Compute WFE1 - WFE2 for common nodes."""
ref_map = {int(nid): (x, y, w) for nid, x, y, w in zip(nids2, X2, Y2, WFE2)}
X_rel, Y_rel, WFE_rel = [], [], []
for nid, x, y, w in zip(nids1, X1, Y1, WFE1):
nid = int(nid)
if nid in ref_map:
_, _, w_ref = ref_map[nid]
X_rel.append(x)
Y_rel.append(y)
WFE_rel.append(w - w_ref)
return np.array(X_rel), np.array(Y_rel), np.array(WFE_rel)
def _compute_metrics(
self,
X: np.ndarray,
Y: np.ndarray,
W_nm: np.ndarray,
n_modes: int,
filter_orders: int
) -> Dict[str, Any]:
"""Compute RMS metrics and Zernike coefficients."""
coeffs, R = compute_zernike_coeffs(X, Y, W_nm, n_modes)
Xc = X - np.mean(X)
Yc = Y - np.mean(Y)
r = np.hypot(Xc / R, Yc / R)
th = np.arctan2(Yc, Xc)
Z = np.column_stack([zernike_noll(j, r, th) for j in range(1, n_modes + 1)])
W_res_filt = W_nm - Z[:, :filter_orders].dot(coeffs[:filter_orders])
W_res_filt_j1to3 = W_nm - Z[:, :3].dot(coeffs[:3])
return {
'coefficients': coeffs,
'R': R,
'global_rms': float(np.sqrt(np.mean(W_nm**2))),
'filtered_rms': float(np.sqrt(np.mean(W_res_filt**2))),
'rms_filter_j1to3': float(np.sqrt(np.mean(W_res_filt_j1to3**2))),
'W_res_filt': W_res_filt,
}
def _compute_aberration_magnitudes(self, coeffs: np.ndarray) -> Dict[str, float]:
"""Compute magnitude of specific aberration modes."""
return {
'defocus_nm': float(abs(coeffs[3])) if len(coeffs) > 3 else 0.0,
'astigmatism_rms': float(np.sqrt(coeffs[4]**2 + coeffs[5]**2)) if len(coeffs) > 5 else 0.0,
'coma_rms': float(np.sqrt(coeffs[6]**2 + coeffs[7]**2)) if len(coeffs) > 7 else 0.0,
'trefoil_rms': float(np.sqrt(coeffs[8]**2 + coeffs[9]**2)) if len(coeffs) > 9 else 0.0,
'spherical_nm': float(abs(coeffs[10])) if len(coeffs) > 10 else 0.0,
}
def _generate_view_html(
self,
title: str,
X: np.ndarray,
Y: np.ndarray,
W_nm: np.ndarray,
rms_data: Dict,
config: Dict,
is_relative: bool = False,
ref_title: str = "20 deg",
abs_pair: Optional[Tuple[float, float]] = None,
is_manufacturing: bool = False,
mfg_metrics: Optional[Dict] = None,
correction_metrics: Optional[Dict] = None,
) -> str:
"""Generate HTML for a single view."""
_load_dependencies()
n_modes = config.get('n_modes', 50)
amp = config.get('amp', 0.5)
pancake = config.get('pancake', 3.0)
downsample = config.get('plot_downsample', 10000)
colorscale = config.get('colorscale', 'Turbo')
show_bar = config.get('show_bar_chart', True)
coeffs = rms_data['coefficients']
global_rms = rms_data['global_rms']
filtered_rms = rms_data['filtered_rms']
W_res_filt = rms_data['W_res_filt']
labels = [zernike_label(j) for j in range(1, n_modes + 1)]
coeff_abs = np.abs(coeffs)
# Downsample
n = len(X)
if n > downsample:
rng = np.random.default_rng(42)
sel = rng.choice(n, size=downsample, replace=False)
Xp, Yp, Wp = X[sel], Y[sel], W_res_filt[sel]
else:
Xp, Yp, Wp = X, Y, W_res_filt
res_amp = amp * Wp
max_amp = float(np.max(np.abs(res_amp))) if res_amp.size else 1.0
# Build mesh
mesh_traces = []
try:
tri = _Triangulation(Xp, Yp)
if tri.triangles is not None and len(tri.triangles) > 0:
i, j, k = tri.triangles.T
mesh_traces.append(_go.Mesh3d(
x=Xp, y=Yp, z=res_amp,
i=i, j=j, k=k,
intensity=res_amp,
colorscale=colorscale,
opacity=1.0,
flatshading=False,
lighting=dict(ambient=0.4, diffuse=0.8, specular=0.3,
roughness=0.5, fresnel=0.2),
lightposition=dict(x=100, y=200, z=300),
showscale=True,
colorbar=dict(title=dict(text="Residual (nm)", side="right"),
thickness=15, len=0.6, tickformat=".1f"),
hovertemplate="X: %{x:.1f}<br>Y: %{y:.1f}<br>Residual: %{z:.2f} nm<extra></extra>"
))
except Exception:
pass
if not mesh_traces:
mesh_traces.append(_go.Scatter3d(
x=Xp, y=Yp, z=res_amp,
mode='markers',
marker=dict(size=2, color=res_amp, colorscale=colorscale, showscale=True),
showlegend=False
))
title_suffix = f" (relative to {ref_title})" if is_relative else " (absolute)"
# Build subplots
if is_manufacturing and mfg_metrics and correction_metrics:
fig = _make_subplots(
rows=5, cols=1,
specs=[[{"type": "scene"}], [{"type": "table"}], [{"type": "table"}],
[{"type": "table"}], [{"type": "xy"}]],
row_heights=[0.38, 0.12, 0.12, 0.18, 0.20],
vertical_spacing=0.025,
subplot_titles=[
f"<b>Surface Residual - {title}{title_suffix}</b>",
"<b>RMS Metrics (Absolute 90 deg)</b>",
"<b>Mode Magnitudes at 90 deg</b>",
"<b>Pre-Correction (90 deg - 20 deg)</b>",
"<b>|Zernike Coefficients| (nm)</b>"
]
)
elif show_bar:
fig = _make_subplots(
rows=4, cols=1,
specs=[[{"type": "scene"}], [{"type": "table"}],
[{"type": "table"}], [{"type": "xy"}]],
row_heights=[0.45, 0.12, 0.25, 0.18],
vertical_spacing=0.03,
subplot_titles=[
f"<b>Surface Residual - {title}{title_suffix}</b>",
"<b>RMS Metrics</b>",
f"<b>Zernike Coefficients ({n_modes} modes)</b>",
"<b>|Zernike Coefficients| (nm)</b>"
]
)
else:
fig = _make_subplots(
rows=3, cols=1,
specs=[[{"type": "scene"}], [{"type": "table"}], [{"type": "table"}]],
row_heights=[0.55, 0.15, 0.30],
vertical_spacing=0.03,
subplot_titles=[
f"<b>Surface Residual - {title}{title_suffix}</b>",
"<b>RMS Metrics</b>",
f"<b>Zernike Coefficients ({n_modes} modes)</b>"
]
)
# Add mesh
for tr in mesh_traces:
fig.add_trace(tr, row=1, col=1)
# Configure 3D scene
fig.update_scenes(
camera=dict(eye=dict(x=1.2, y=1.2, z=0.8), up=dict(x=0, y=0, z=1)),
xaxis=dict(title="X (mm)", showgrid=True,
gridcolor='rgba(128,128,128,0.3)',
showbackground=True, backgroundcolor='rgba(240,240,240,0.9)'),
yaxis=dict(title="Y (mm)", showgrid=True,
gridcolor='rgba(128,128,128,0.3)',
showbackground=True, backgroundcolor='rgba(240,240,240,0.9)'),
zaxis=dict(title="Residual (nm)",
range=[-max_amp * pancake, max_amp * pancake],
showgrid=True, gridcolor='rgba(128,128,128,0.3)',
showbackground=True, backgroundcolor='rgba(230,230,250,0.9)'),
aspectmode='manual',
aspectratio=dict(x=1, y=1, z=0.4)
)
# Add tables
if is_relative and abs_pair:
abs_global, abs_filtered = abs_pair
fig.add_trace(_go.Table(
header=dict(values=["<b>Metric</b>", "<b>Relative (nm)</b>", "<b>Absolute (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
["Global RMS", "Filtered RMS (J1-J4 removed)"],
[f"{global_rms:.2f}", f"{filtered_rms:.2f}"],
[f"{abs_global:.2f}", f"{abs_filtered:.2f}"],
], align="left", fill_color='#374151', font=dict(color='white'))
), row=2, col=1)
elif is_manufacturing and mfg_metrics and correction_metrics:
fig.add_trace(_go.Table(
header=dict(values=["<b>Metric</b>", "<b>Value (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
["Global RMS", "Filtered RMS (J1-J4)"],
[f"{global_rms:.2f}", f"{filtered_rms:.2f}"]
], align="left", fill_color='#374151', font=dict(color='white'))
), row=2, col=1)
fig.add_trace(_go.Table(
header=dict(values=["<b>Mode</b>", "<b>Value (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
["Filtered RMS (J1-J3, with defocus)", "Astigmatism (J5+J6)",
"Coma (J7+J8)", "Trefoil (J9+J10)", "Spherical (J11)"],
[f"{rms_data['rms_filter_j1to3']:.2f}",
f"{mfg_metrics['astigmatism_rms']:.2f}",
f"{mfg_metrics['coma_rms']:.2f}",
f"{mfg_metrics['trefoil_rms']:.2f}",
f"{mfg_metrics['spherical_nm']:.2f}"]
], align="left", fill_color='#374151', font=dict(color='white'))
), row=3, col=1)
fig.add_trace(_go.Table(
header=dict(values=["<b>Mode</b>", "<b>Correction (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
["Total RMS (J1-J3 filter)", "Defocus (J4)",
"Astigmatism (J5+J6)", "Coma (J7+J8)"],
[f"{correction_metrics['rms_filter_j1to3']:.2f}",
f"{correction_metrics['defocus_nm']:.2f}",
f"{correction_metrics['astigmatism_rms']:.2f}",
f"{correction_metrics['coma_rms']:.2f}"]
], align="left", fill_color='#374151', font=dict(color='white'))
), row=4, col=1)
else:
fig.add_trace(_go.Table(
header=dict(values=["<b>Metric</b>", "<b>Value (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
["Global RMS", "Filtered RMS (J1-J4 removed)"],
[f"{global_rms:.2f}", f"{filtered_rms:.2f}"]
], align="left", fill_color='#374151', font=dict(color='white'))
), row=2, col=1)
# Coefficients table
if not (is_manufacturing and mfg_metrics and correction_metrics):
fig.add_trace(_go.Table(
header=dict(values=["<b>Noll j</b>", "<b>Label</b>", "<b>|Coeff| (nm)</b>"],
align="left", fill_color='#1f2937', font=dict(color='white')),
cells=dict(values=[
list(range(1, n_modes + 1)),
labels,
[f"{c:.3f}" for c in coeff_abs]
], align="left", fill_color='#374151', font=dict(color='white'))
), row=3, col=1)
# Bar chart
if show_bar:
bar_row = 5 if (is_manufacturing and mfg_metrics and correction_metrics) else 4
fig.add_trace(
_go.Bar(
x=coeff_abs.tolist(), y=labels,
orientation='h', marker_color='#6366f1',
hovertemplate="%{y}<br>|Coeff| = %{x:.3f} nm<extra></extra>",
showlegend=False
),
row=bar_row, col=1
)
# Layout
height = 1500 if (is_manufacturing and mfg_metrics and correction_metrics) else 1300
fig.update_layout(
width=1400, height=height,
margin=dict(t=60, b=20, l=20, r=20),
paper_bgcolor='#111827', plot_bgcolor='#1f2937',
font=dict(color='white'),
title=dict(text=f"<b>Atomizer Zernike Analysis - {title}</b>",
x=0.5, font=dict(size=18))
)
return fig.to_html(include_plotlyjs='cdn', full_html=True)
def _generate(self, config: InsightConfig) -> InsightResult:
"""Generate all Zernike WFE views."""
self._load_data()
# Merge config
cfg = {**DEFAULT_CONFIG, **config.extra}
cfg['colorscale'] = config.extra.get('colorscale', cfg['colorscale'])
cfg['amp'] = config.amplification if config.amplification != 1.0 else cfg['amp']
n_modes = cfg['n_modes']
filter_orders = cfg['filter_low_orders']
disp_unit = cfg['disp_unit']
# Map subcases
disps = self._displacements
if '1' in disps and '2' in disps:
sc_map = {'90': '1', '20': '2', '40': '3', '60': '4'}
elif '90' in disps and '20' in disps:
sc_map = {'90': '90', '20': '20', '40': '40', '60': '60'}
else:
available = sorted(disps.keys(), key=lambda x: int(x) if x.isdigit() else 0)
if len(available) >= 4:
sc_map = {'90': available[0], '20': available[1],
'40': available[2], '60': available[3]}
else:
return InsightResult(success=False,
error=f"Need 4 subcases, found: {available}")
# Check subcases
for angle, label in sc_map.items():
if label not in disps:
return InsightResult(success=False,
error=f"Subcase '{label}' (angle {angle}) not found")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = config.output_dir or self.insights_path
output_dir.mkdir(parents=True, exist_ok=True)
html_files = []
summary = {}
# Reference: 20 deg
X_ref, Y_ref, WFE_ref, nids_ref = self._build_wfe_arrays(sc_map['20'], disp_unit)
rms_ref = self._compute_metrics(X_ref, Y_ref, WFE_ref, n_modes, filter_orders)
# 90 deg
X_90, Y_90, WFE_90, nids_90 = self._build_wfe_arrays(sc_map['90'], disp_unit)
rms_90 = self._compute_metrics(X_90, Y_90, WFE_90, n_modes, filter_orders)
mfg_metrics = self._compute_aberration_magnitudes(rms_90['coefficients'])
# 40 deg vs 20 deg
X_40, Y_40, WFE_40, nids_40 = self._build_wfe_arrays(sc_map['40'], disp_unit)
X_40_rel, Y_40_rel, WFE_40_rel = self._compute_relative_wfe(
X_40, Y_40, WFE_40, nids_40, X_ref, Y_ref, WFE_ref, nids_ref)
rms_40_abs = self._compute_metrics(X_40, Y_40, WFE_40, n_modes, filter_orders)
rms_40_rel = self._compute_metrics(X_40_rel, Y_40_rel, WFE_40_rel, n_modes, filter_orders)
html_40 = self._generate_view_html(
"40 deg", X_40_rel, Y_40_rel, WFE_40_rel, rms_40_rel, cfg,
is_relative=True, ref_title="20 deg",
abs_pair=(rms_40_abs['global_rms'], rms_40_abs['filtered_rms']))
path_40 = output_dir / f"zernike_{timestamp}_40_vs_20.html"
path_40.write_text(html_40, encoding='utf-8')
html_files.append(path_40)
summary['40_vs_20_filtered_rms'] = rms_40_rel['filtered_rms']
# 60 deg vs 20 deg
X_60, Y_60, WFE_60, nids_60 = self._build_wfe_arrays(sc_map['60'], disp_unit)
X_60_rel, Y_60_rel, WFE_60_rel = self._compute_relative_wfe(
X_60, Y_60, WFE_60, nids_60, X_ref, Y_ref, WFE_ref, nids_ref)
rms_60_abs = self._compute_metrics(X_60, Y_60, WFE_60, n_modes, filter_orders)
rms_60_rel = self._compute_metrics(X_60_rel, Y_60_rel, WFE_60_rel, n_modes, filter_orders)
html_60 = self._generate_view_html(
"60 deg", X_60_rel, Y_60_rel, WFE_60_rel, rms_60_rel, cfg,
is_relative=True, ref_title="20 deg",
abs_pair=(rms_60_abs['global_rms'], rms_60_abs['filtered_rms']))
path_60 = output_dir / f"zernike_{timestamp}_60_vs_20.html"
path_60.write_text(html_60, encoding='utf-8')
html_files.append(path_60)
summary['60_vs_20_filtered_rms'] = rms_60_rel['filtered_rms']
# 90 deg Manufacturing
X_90_rel, Y_90_rel, WFE_90_rel = self._compute_relative_wfe(
X_90, Y_90, WFE_90, nids_90, X_ref, Y_ref, WFE_ref, nids_ref)
rms_90_rel = self._compute_metrics(X_90_rel, Y_90_rel, WFE_90_rel, n_modes, filter_orders)
corr_abr = self._compute_aberration_magnitudes(rms_90_rel['coefficients'])
correction_metrics = {
'rms_filter_j1to3': rms_90_rel['rms_filter_j1to3'],
**corr_abr
}
html_90 = self._generate_view_html(
"90 deg (Manufacturing)", X_90, Y_90, WFE_90, rms_90, cfg,
is_relative=False, is_manufacturing=True,
mfg_metrics=mfg_metrics, correction_metrics=correction_metrics)
path_90 = output_dir / f"zernike_{timestamp}_90_mfg.html"
path_90.write_text(html_90, encoding='utf-8')
html_files.append(path_90)
summary['90_mfg_filtered_rms'] = rms_90['filtered_rms']
summary['90_optician_workload'] = rms_90['rms_filter_j1to3']
return InsightResult(
success=True,
html_path=html_files[0], # Return first as primary
summary={
'html_files': [str(p) for p in html_files],
**summary
}
)