feat: Add Zernike GNN surrogate module and M1 mirror V12/V13 studies
This commit introduces the GNN-based surrogate for Zernike mirror optimization and the M1 mirror study progression from V12 (GNN validation) to V13 (pure NSGA-II). ## GNN Surrogate Module (optimization_engine/gnn/) New module for Graph Neural Network surrogate prediction of mirror deformations: - `polar_graph.py`: PolarMirrorGraph - fixed 3000-node polar grid structure - `zernike_gnn.py`: ZernikeGNN with design-conditioned message passing - `differentiable_zernike.py`: GPU-accelerated Zernike fitting and objectives - `train_zernike_gnn.py`: ZernikeGNNTrainer with multi-task loss - `gnn_optimizer.py`: ZernikeGNNOptimizer for turbo mode (~900k trials/hour) - `extract_displacement_field.py`: OP2 to HDF5 field extraction - `backfill_field_data.py`: Extract fields from existing FEA trials Key innovation: Design-conditioned convolutions that modulate message passing based on structural design parameters, enabling accurate field prediction. ## M1 Mirror Studies ### V12: GNN Field Prediction + FEA Validation - Zernike GNN trained on V10/V11 FEA data (238 samples) - Turbo mode: 5000 GNN predictions → top candidates → FEA validation - Calibration workflow for GNN-to-FEA error correction - Scripts: run_gnn_turbo.py, validate_gnn_best.py, compute_full_calibration.py ### V13: Pure NSGA-II FEA (Ground Truth) - Seeds 217 FEA trials from V11+V12 - Pure multi-objective NSGA-II without any surrogate - Establishes ground-truth Pareto front for GNN accuracy evaluation - Narrowed blank_backface_angle range to [4.0, 5.0] ## Documentation Updates - SYS_14: Added Zernike GNN section with architecture diagrams - CLAUDE.md: Added GNN module reference and quick start - V13 README: Study documentation with seeding strategy 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
220
studies/m1_mirror_adaptive_V12/1_setup/optimization_config.json
Normal file
220
studies/m1_mirror_adaptive_V12/1_setup/optimization_config.json
Normal file
@@ -0,0 +1,220 @@
|
||||
{
|
||||
"$schema": "Atomizer M1 Mirror Adaptive Surrogate Optimization V12",
|
||||
"study_name": "m1_mirror_adaptive_V12",
|
||||
"description": "V12 - Adaptive optimization with tuned hyperparameters, ensemble surrogate, and mass constraint (<99kg).",
|
||||
|
||||
"source_study": {
|
||||
"path": "../m1_mirror_adaptive_V11",
|
||||
"database": "../m1_mirror_adaptive_V11/3_results/study.db",
|
||||
"model_dir": "../m1_mirror_adaptive_V11/1_setup/model",
|
||||
"description": "V11 FEA data (107 samples) used for initial surrogate training"
|
||||
},
|
||||
|
||||
"source_model_dir": "C:\\Users\\Antoine\\CADTOMASTE\\Atomizer\\M1-Gigabit\\Latest",
|
||||
|
||||
"design_variables": [
|
||||
{
|
||||
"name": "lateral_inner_angle",
|
||||
"expression_name": "lateral_inner_angle",
|
||||
"min": 25.0,
|
||||
"max": 28.5,
|
||||
"baseline": 26.79,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_angle",
|
||||
"expression_name": "lateral_outer_angle",
|
||||
"min": 13.0,
|
||||
"max": 17.0,
|
||||
"baseline": 14.64,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_pivot",
|
||||
"expression_name": "lateral_outer_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.40,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_inner_pivot",
|
||||
"expression_name": "lateral_inner_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.07,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_middle_pivot",
|
||||
"expression_name": "lateral_middle_pivot",
|
||||
"min": 18.0,
|
||||
"max": 23.0,
|
||||
"baseline": 20.73,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_closeness",
|
||||
"expression_name": "lateral_closeness",
|
||||
"min": 9.5,
|
||||
"max": 12.5,
|
||||
"baseline": 11.02,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_min",
|
||||
"expression_name": "whiffle_min",
|
||||
"min": 35.0,
|
||||
"max": 55.0,
|
||||
"baseline": 40.55,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_outer_to_vertical",
|
||||
"expression_name": "whiffle_outer_to_vertical",
|
||||
"min": 68.0,
|
||||
"max": 80.0,
|
||||
"baseline": 75.67,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_triangle_closeness",
|
||||
"expression_name": "whiffle_triangle_closeness",
|
||||
"min": 50.0,
|
||||
"max": 65.0,
|
||||
"baseline": 60.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "blank_backface_angle",
|
||||
"expression_name": "blank_backface_angle",
|
||||
"min": 4,
|
||||
"max": 5.0,
|
||||
"baseline": 4.23,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "inner_circular_rib_dia",
|
||||
"expression_name": "inner_circular_rib_dia",
|
||||
"min": 480.0,
|
||||
"max": 620.0,
|
||||
"baseline": 534.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
}
|
||||
],
|
||||
|
||||
"objectives": [
|
||||
{
|
||||
"name": "rel_filtered_rms_40_vs_20",
|
||||
"description": "Filtered RMS WFE at 40 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 4.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "3",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "rel_filtered_rms_60_vs_20",
|
||||
"description": "Filtered RMS WFE at 60 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 10.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "4",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "mfg_90_optician_workload",
|
||||
"description": "Manufacturing deformation at 90 deg polishing (J1-J3 filtered RMS)",
|
||||
"direction": "minimize",
|
||||
"weight": 1.0,
|
||||
"target": 20.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "1",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_rms_filter_j1to3"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"zernike_settings": {
|
||||
"n_modes": 50,
|
||||
"filter_low_orders": 4,
|
||||
"displacement_unit": "mm",
|
||||
"subcases": ["1", "2", "3", "4"],
|
||||
"subcase_labels": {"1": "90deg", "2": "20deg", "3": "40deg", "4": "60deg"},
|
||||
"reference_subcase": "2"
|
||||
},
|
||||
|
||||
"constraints": [
|
||||
{
|
||||
"name": "mass_limit",
|
||||
"type": "upper_bound",
|
||||
"expression_name": "p173",
|
||||
"max_value": 99.0,
|
||||
"units": "kg",
|
||||
"description": "Mirror assembly mass must be under 99kg",
|
||||
"penalty_weight": 100.0,
|
||||
"influenced_by": ["blank_backface_angle"]
|
||||
}
|
||||
],
|
||||
|
||||
"adaptive_settings": {
|
||||
"max_iterations": 100,
|
||||
"surrogate_trials_per_iter": 1000,
|
||||
"fea_batch_size": 5,
|
||||
"strategy": "hybrid",
|
||||
"exploration_ratio": 0.3,
|
||||
"convergence_threshold_nm": 0.3,
|
||||
"patience": 5,
|
||||
"min_training_samples": 30,
|
||||
"retrain_epochs": 300
|
||||
},
|
||||
|
||||
"surrogate_settings": {
|
||||
"model_type": "ZernikeSurrogate",
|
||||
"hidden_dims": [128, 256, 256, 128, 64],
|
||||
"dropout": 0.1,
|
||||
"learning_rate": 0.001,
|
||||
"batch_size": 16,
|
||||
"mc_dropout_samples": 30
|
||||
},
|
||||
|
||||
"nx_settings": {
|
||||
"nx_install_path": "C:\\Program Files\\Siemens\\NX2506",
|
||||
"sim_file": "ASSY_M1_assyfem1_sim1.sim",
|
||||
"solution_name": "Solution 1",
|
||||
"op2_pattern": "*-solution_1.op2",
|
||||
"simulation_timeout_s": 900,
|
||||
"journal_timeout_s": 120,
|
||||
"op2_timeout_s": 1800,
|
||||
"auto_start_nx": true
|
||||
},
|
||||
|
||||
"dashboard_settings": {
|
||||
"trial_source_tag": true,
|
||||
"fea_marker": "circle",
|
||||
"nn_marker": "cross",
|
||||
"fea_color": "#2196F3",
|
||||
"nn_color": "#FF9800"
|
||||
}
|
||||
}
|
||||
529
studies/m1_mirror_adaptive_V12/README.md
Normal file
529
studies/m1_mirror_adaptive_V12/README.md
Normal file
@@ -0,0 +1,529 @@
|
||||
# M1 Mirror Adaptive Surrogate Optimization V12
|
||||
|
||||
Adaptive neural-accelerated optimization of telescope primary mirror (M1) support structure using Zernike wavefront error decomposition with **auto-tuned hyperparameters**, **ensemble surrogates**, and **mass constraints**.
|
||||
|
||||
**Created**: 2024-12-04
|
||||
**Protocol**: Protocol 12 (Adaptive Hybrid FEA/Neural with Hyperparameter Tuning)
|
||||
**Status**: Running
|
||||
**Source Data**: V11 (107 FEA samples)
|
||||
|
||||
---
|
||||
|
||||
## 1. Engineering Problem
|
||||
|
||||
### 1.1 Objective
|
||||
|
||||
Optimize the telescope primary mirror (M1) whiffle tree and lateral support structure to minimize wavefront error (WFE) across different gravity orientations while maintaining mass under 99 kg.
|
||||
|
||||
### 1.2 Physical System
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| **Component** | M1 primary mirror assembly with whiffle tree support |
|
||||
| **Material** | Borosilicate glass (mirror blank), steel (support structure) |
|
||||
| **Loading** | Gravity at multiple zenith angles (90°, 20°, 40°, 60°) |
|
||||
| **Boundary Conditions** | Whiffle tree kinematic mount with lateral supports |
|
||||
| **Analysis Type** | Linear static multi-subcase (Nastran SOL 101) |
|
||||
| **Subcases** | 4 orientations with different gravity vectors |
|
||||
| **Output** | Surface deformation → Zernike polynomial decomposition |
|
||||
|
||||
### 1.3 Key Improvements in V12
|
||||
|
||||
| Feature | V11 | V12 |
|
||||
|---------|-----|-----|
|
||||
| Hyperparameter Tuning | Fixed architecture | Optuna auto-tuning |
|
||||
| Model Architecture | Single network | Ensemble of 3 models |
|
||||
| Validation | Train/test split | K-fold cross-validation |
|
||||
| Mass Constraint | Post-hoc check | Integrated penalty |
|
||||
| Convergence | Fixed iterations | Early stopping with patience |
|
||||
|
||||
---
|
||||
|
||||
## 2. Mathematical Formulation
|
||||
|
||||
### 2.1 Objectives
|
||||
|
||||
| Objective | Goal | Weight | Formula | Units | Target |
|
||||
|-----------|------|--------|---------|-------|--------|
|
||||
| `rel_filtered_rms_40_vs_20` | minimize | 5.0 | $\sigma_{40/20} = \sqrt{\sum_{j=5}^{50} (Z_j^{40} - Z_j^{20})^2}$ | nm | 4 nm |
|
||||
| `rel_filtered_rms_60_vs_20` | minimize | 5.0 | $\sigma_{60/20} = \sqrt{\sum_{j=5}^{50} (Z_j^{60} - Z_j^{20})^2}$ | nm | 10 nm |
|
||||
| `mfg_90_optician_workload` | minimize | 1.0 | $\sigma_{90}^{J4+} = \sqrt{\sum_{j=4}^{50} (Z_j^{90} - Z_j^{20})^2}$ | nm | 20 nm |
|
||||
|
||||
**Weighted Sum Objective**:
|
||||
$$J(\mathbf{x}) = \sum_{i=1}^{3} w_i \cdot \frac{f_i(\mathbf{x})}{t_i} + P_{mass}(\mathbf{x})$$
|
||||
|
||||
Where:
|
||||
- $w_i$ = weight for objective $i$
|
||||
- $f_i(\mathbf{x})$ = objective value
|
||||
- $t_i$ = target value (normalization)
|
||||
- $P_{mass}$ = mass constraint penalty
|
||||
|
||||
### 2.2 Zernike Decomposition
|
||||
|
||||
The wavefront error $W(r,\theta)$ is decomposed into Noll-indexed Zernike polynomials:
|
||||
|
||||
$$W(r,\theta) = \sum_{j=1}^{50} Z_j \cdot P_j(r,\theta)$$
|
||||
|
||||
**WFE from Displacement** (reflection factor of 2):
|
||||
$$W_{nm} = 2 \cdot \delta_z \cdot 10^6$$
|
||||
|
||||
Where $\delta_z$ is the Z-displacement in mm.
|
||||
|
||||
**Filtered RMS** (excluding alignable terms J1-J4):
|
||||
$$\sigma_{filtered} = \sqrt{\sum_{j=5}^{50} Z_j^2}$$
|
||||
|
||||
**Manufacturing RMS** (excluding J1-J3, keeping defocus J4):
|
||||
$$\sigma_{mfg} = \sqrt{\sum_{j=4}^{50} Z_j^2}$$
|
||||
|
||||
### 2.3 Design Variables (11 Parameters)
|
||||
|
||||
| Parameter | Symbol | Bounds | Baseline | Units | Description |
|
||||
|-----------|--------|--------|----------|-------|-------------|
|
||||
| `lateral_inner_angle` | $\alpha_{in}$ | [25, 28.5] | 26.79 | deg | Inner lateral support angle |
|
||||
| `lateral_outer_angle` | $\alpha_{out}$ | [13, 17] | 14.64 | deg | Outer lateral support angle |
|
||||
| `lateral_outer_pivot` | $p_{out}$ | [9, 12] | 10.40 | mm | Outer pivot offset |
|
||||
| `lateral_inner_pivot` | $p_{in}$ | [9, 12] | 10.07 | mm | Inner pivot offset |
|
||||
| `lateral_middle_pivot` | $p_{mid}$ | [18, 23] | 20.73 | mm | Middle pivot offset |
|
||||
| `lateral_closeness` | $c_{lat}$ | [9.5, 12.5] | 11.02 | mm | Lateral support spacing |
|
||||
| `whiffle_min` | $w_{min}$ | [35, 55] | 40.55 | mm | Whiffle tree minimum |
|
||||
| `whiffle_outer_to_vertical` | $\theta_w$ | [68, 80] | 75.67 | deg | Outer whiffle angle |
|
||||
| `whiffle_triangle_closeness` | $c_w$ | [50, 65] | 60.00 | mm | Whiffle triangle spacing |
|
||||
| `blank_backface_angle` | $\beta$ | [4.0, 5.0] | 4.23 | deg | Mirror backface angle (mass driver) |
|
||||
| `inner_circular_rib_dia` | $D_{rib}$ | [480, 620] | 534.00 | mm | Inner rib diameter |
|
||||
|
||||
**Design Space**:
|
||||
$$\mathbf{x} = [\alpha_{in}, \alpha_{out}, p_{out}, p_{in}, p_{mid}, c_{lat}, w_{min}, \theta_w, c_w, \beta, D_{rib}]^T \in \mathbb{R}^{11}$$
|
||||
|
||||
### 2.4 Mass Constraint
|
||||
|
||||
| Constraint | Type | Formula | Threshold | Handling |
|
||||
|------------|------|---------|-----------|----------|
|
||||
| `mass_limit` | upper_bound | $m(\mathbf{x}) \leq m_{max}$ | 99 kg | Penalty in objective |
|
||||
|
||||
**Penalty Function**:
|
||||
$$P_{mass}(\mathbf{x}) = \begin{cases}
|
||||
0 & \text{if } m \leq 99 \\
|
||||
100 \cdot (m - 99) & \text{if } m > 99
|
||||
\end{cases}$$
|
||||
|
||||
**Mass Estimation** (from `blank_backface_angle`):
|
||||
$$\hat{m}(\beta) = 105 - 15 \cdot (\beta - 4.0) \text{ kg}$$
|
||||
|
||||
---
|
||||
|
||||
## 3. Optimization Algorithm
|
||||
|
||||
### 3.1 Adaptive Hybrid Strategy
|
||||
|
||||
| Parameter | Value | Description |
|
||||
|-----------|-------|-------------|
|
||||
| Algorithm | Adaptive Hybrid | FEA + Neural Surrogate |
|
||||
| Surrogate | Tuned Ensemble (3 models) | Auto-tuned architecture |
|
||||
| Sampler | TPE | Tree-structured Parzen Estimator |
|
||||
| Max Iterations | 100 | Adaptive loop iterations |
|
||||
| FEA Batch Size | 5 | Real FEA evaluations per iteration |
|
||||
| NN Trials | 1000 | Surrogate evaluations per iteration |
|
||||
| Patience | 5 | Early stopping threshold |
|
||||
| Convergence | 0.3 nm | Objective improvement threshold |
|
||||
|
||||
### 3.2 Hyperparameter Tuning
|
||||
|
||||
| Setting | Value |
|
||||
|---------|-------|
|
||||
| Tuning Trials | 30 |
|
||||
| Cross-Validation | 5-fold |
|
||||
| Search Space | Hidden dims, dropout, learning rate |
|
||||
| Ensemble Size | 3 models |
|
||||
| MC Dropout Samples | 30 |
|
||||
|
||||
**Tuned Architecture**:
|
||||
```
|
||||
Input(11) → Linear(128) → ReLU → Dropout →
|
||||
Linear(256) → ReLU → Dropout →
|
||||
Linear(256) → ReLU → Dropout →
|
||||
Linear(128) → ReLU → Dropout →
|
||||
Linear(64) → ReLU → Linear(4)
|
||||
```
|
||||
|
||||
### 3.3 Adaptive Loop Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ ADAPTIVE ITERATION k │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ 1. SURROGATE EXPLORATION (1000 trials) │
|
||||
│ ├── Sample 11 design variables via TPE │
|
||||
│ ├── Predict objectives with ensemble (mean + uncertainty) │
|
||||
│ └── Select top candidates (exploitation) + diverse (exploration) │
|
||||
│ │
|
||||
│ 2. FEA VALIDATION (5 trials) │
|
||||
│ ├── Run NX Nastran SOL 101 (4 subcases) │
|
||||
│ ├── Extract Zernike coefficients from OP2 │
|
||||
│ ├── Compute relative filtered RMS │
|
||||
│ └── Store in Optuna database │
|
||||
│ │
|
||||
│ 3. SURROGATE RETRAINING │
|
||||
│ ├── Load all FEA data from database │
|
||||
│ ├── Retrain ensemble with new data │
|
||||
│ └── Update uncertainty estimates │
|
||||
│ │
|
||||
│ 4. CONVERGENCE CHECK │
|
||||
│ ├── Δbest < 0.3 nm for patience=5 iterations? │
|
||||
│ └── If converged → STOP, else → next iteration │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Simulation Pipeline
|
||||
|
||||
### 4.1 FEA Trial Execution Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ FEA TRIAL n EXECUTION │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ 1. CANDIDATE SELECTION │
|
||||
│ Hybrid strategy: 70% exploitation (best NN predictions) │
|
||||
│ 30% exploration (uncertain regions) │
|
||||
│ │
|
||||
│ 2. NX PARAMETER UPDATE │
|
||||
│ Module: optimization_engine/nx_solver.py │
|
||||
│ Target Part: M1_Blank.prt (and related components) │
|
||||
│ Action: Update 11 expressions with new design values │
|
||||
│ │
|
||||
│ 3. NX SIMULATION (Nastran SOL 101 - 4 Subcases) │
|
||||
│ Module: optimization_engine/solve_simulation.py │
|
||||
│ Input: ASSY_M1_assyfem1_sim1.sim │
|
||||
│ Subcases: │
|
||||
│ 1 = 90° zenith (polishing/manufacturing) │
|
||||
│ 2 = 20° zenith (reference) │
|
||||
│ 3 = 40° zenith (operational target 1) │
|
||||
│ 4 = 60° zenith (operational target 2) │
|
||||
│ Output: .dat, .op2, .f06 │
|
||||
│ │
|
||||
│ 4. ZERNIKE EXTRACTION │
|
||||
│ Module: optimization_engine/extractors/extract_zernike.py │
|
||||
│ a. Read node coordinates from BDF/DAT │
|
||||
│ b. Read Z-displacements from OP2 for each subcase │
|
||||
│ c. Compute RELATIVE displacement (target - reference) │
|
||||
│ d. Convert to WFE: W = 2 × Δδz × 10⁶ nm │
|
||||
│ e. Fit 50 Zernike coefficients via least-squares │
|
||||
│ f. Compute filtered RMS (exclude J1-J4) │
|
||||
│ │
|
||||
│ 5. MASS EXTRACTION │
|
||||
│ Module: optimization_engine/extractors/extract_mass_from_expression │
|
||||
│ Expression: p173 (CAD mass property) │
|
||||
│ │
|
||||
│ 6. OBJECTIVE COMPUTATION │
|
||||
│ rel_filtered_rms_40_vs_20 ← Zernike RMS (subcase 3 - 2) │
|
||||
│ rel_filtered_rms_60_vs_20 ← Zernike RMS (subcase 4 - 2) │
|
||||
│ mfg_90_optician_workload ← Zernike RMS J4+ (subcase 1 - 2) │
|
||||
│ │
|
||||
│ 7. WEIGHTED OBJECTIVE + MASS PENALTY │
|
||||
│ J = Σ (weight × objective / target) + mass_penalty │
|
||||
│ │
|
||||
│ 8. STORE IN DATABASE │
|
||||
│ Optuna SQLite: 3_results/study.db │
|
||||
│ User attrs: trial_source='fea', mass_kg, all Zernike coefficients │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 4.2 Subcase Configuration
|
||||
|
||||
| Subcase | Zenith Angle | Gravity Direction | Role |
|
||||
|---------|--------------|-------------------|------|
|
||||
| 1 | 90° | Horizontal | Manufacturing/polishing reference |
|
||||
| 2 | 20° | Near-vertical | Operational reference (baseline) |
|
||||
| 3 | 40° | Mid-elevation | Operational target 1 |
|
||||
| 4 | 60° | Low-elevation | Operational target 2 |
|
||||
|
||||
---
|
||||
|
||||
## 5. Result Extraction Methods
|
||||
|
||||
### 5.1 Zernike WFE Extraction
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Extractor** | `ZernikeExtractor` |
|
||||
| **Module** | `optimization_engine.extractors.extract_zernike` |
|
||||
| **Method** | `extract_relative()` |
|
||||
| **Geometry Source** | `.dat` (BDF format, auto-detected) |
|
||||
| **Displacement Source** | `.op2` (OP2 binary) |
|
||||
| **Output** | 50 Zernike coefficients + RMS metrics per subcase pair |
|
||||
|
||||
**Algorithm**:
|
||||
|
||||
1. Load node coordinates $(X_i, Y_i)$ from BDF
|
||||
2. Load Z-displacements $\delta_{z,i}$ from OP2 for each subcase
|
||||
3. Compute relative displacement (node-by-node):
|
||||
$$\Delta\delta_{z,i} = \delta_{z,i}^{target} - \delta_{z,i}^{reference}$$
|
||||
4. Convert to WFE:
|
||||
$$W_i = 2 \cdot \Delta\delta_{z,i} \cdot 10^6 \text{ nm}$$
|
||||
5. Fit Zernike coefficients via least-squares:
|
||||
$$\min_{\mathbf{Z}} \| \mathbf{W} - \mathbf{P} \mathbf{Z} \|^2$$
|
||||
6. Compute filtered RMS:
|
||||
$$\sigma_{filtered} = \sqrt{\sum_{j=5}^{50} Z_j^2}$$
|
||||
|
||||
**Code**:
|
||||
```python
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
|
||||
extractor = ZernikeExtractor(op2_file, bdf_file)
|
||||
result = extractor.extract_relative(
|
||||
target_subcase="3", # 40 deg
|
||||
reference_subcase="2", # 20 deg
|
||||
displacement_unit="mm"
|
||||
)
|
||||
filtered_rms = result['relative_filtered_rms_nm'] # nm
|
||||
```
|
||||
|
||||
### 5.2 Mass Extraction
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Extractor** | `extract_mass_from_expression` |
|
||||
| **Module** | `optimization_engine.extractors` |
|
||||
| **Expression** | `p173` (CAD mass property) |
|
||||
| **Output** | kg |
|
||||
|
||||
**Code**:
|
||||
```python
|
||||
from optimization_engine.extractors import extract_mass_from_expression
|
||||
|
||||
mass_kg = extract_mass_from_expression(model_file, expression_name="p173")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Neural Acceleration (Tuned Ensemble Surrogate)
|
||||
|
||||
### 6.1 Configuration
|
||||
|
||||
| Setting | Value | Description |
|
||||
|---------|-------|-------------|
|
||||
| `enabled` | `true` | Neural surrogate active |
|
||||
| `model_type` | `TunedEnsembleSurrogate` | Ensemble of tuned networks |
|
||||
| `ensemble_size` | 3 | Number of models in ensemble |
|
||||
| `hidden_dims` | `[128, 256, 256, 128, 64]` | Auto-tuned architecture |
|
||||
| `dropout` | 0.1 | Regularization |
|
||||
| `learning_rate` | 0.001 | Adam optimizer |
|
||||
| `batch_size` | 16 | Mini-batch size |
|
||||
| `mc_dropout_samples` | 30 | Monte Carlo uncertainty |
|
||||
|
||||
### 6.2 Hyperparameter Tuning
|
||||
|
||||
| Parameter | Search Space |
|
||||
|-----------|--------------|
|
||||
| `n_layers` | [3, 4, 5, 6] |
|
||||
| `hidden_dim` | [64, 128, 256, 512] |
|
||||
| `dropout` | [0.0, 0.1, 0.2, 0.3] |
|
||||
| `learning_rate` | [1e-4, 1e-3, 1e-2] |
|
||||
| `batch_size` | [8, 16, 32, 64] |
|
||||
|
||||
**Tuning Objective**:
|
||||
$$\mathcal{L}_{tune} = \frac{1}{K} \sum_{k=1}^{K} MSE_{val}^{(k)}$$
|
||||
|
||||
Using 5-fold cross-validation.
|
||||
|
||||
### 6.3 Surrogate Model
|
||||
|
||||
**Input**: $\mathbf{x} = [11 \text{ design variables}]^T \in \mathbb{R}^{11}$
|
||||
|
||||
**Output**: $\hat{\mathbf{y}} = [4 \text{ objectives/constraints}]^T \in \mathbb{R}^{4}$
|
||||
- `rel_filtered_rms_40_vs_20` (nm)
|
||||
- `rel_filtered_rms_60_vs_20` (nm)
|
||||
- `mfg_90_optician_workload` (nm)
|
||||
- `mass_kg` (kg)
|
||||
|
||||
**Ensemble Prediction**:
|
||||
$$\hat{y} = \frac{1}{M} \sum_{m=1}^{M} f_m(\mathbf{x})$$
|
||||
|
||||
**Uncertainty Quantification** (MC Dropout):
|
||||
$$\sigma_y^2 = \frac{1}{T} \sum_{t=1}^{T} f_{dropout}^{(t)}(\mathbf{x})^2 - \hat{y}^2$$
|
||||
|
||||
### 6.4 Training Data Location
|
||||
|
||||
```
|
||||
m1_mirror_adaptive_V12/
|
||||
├── 2_iterations/
|
||||
│ ├── iter_001/ # Iteration 1 working files
|
||||
│ ├── iter_002/
|
||||
│ └── ...
|
||||
├── 3_results/
|
||||
│ ├── study.db # Optuna database (all trials)
|
||||
│ ├── optimization.log # Detailed log
|
||||
│ ├── surrogate_best.pt # Best tuned model weights
|
||||
│ └── tuning_results.json # Hyperparameter tuning history
|
||||
```
|
||||
|
||||
### 6.5 Expected Performance
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Source Data | V11: 107 FEA samples |
|
||||
| FEA time per trial | 10-15 min |
|
||||
| Neural time per trial | ~5 ms |
|
||||
| Speedup | ~120,000x |
|
||||
| Expected R² | > 0.90 (after tuning) |
|
||||
| Uncertainty Coverage | ~95% (ensemble + MC dropout) |
|
||||
|
||||
---
|
||||
|
||||
## 7. Study File Structure
|
||||
|
||||
```
|
||||
m1_mirror_adaptive_V12/
|
||||
│
|
||||
├── 1_setup/ # INPUT CONFIGURATION
|
||||
│ ├── model/ → symlink to V11 # NX Model Files
|
||||
│ │ ├── ASSY_M1.prt # Top-level assembly
|
||||
│ │ ├── M1_Blank.prt # Mirror blank (expressions)
|
||||
│ │ ├── ASSY_M1_assyfem1.afm # Assembly FEM
|
||||
│ │ ├── ASSY_M1_assyfem1_sim1.sim # Simulation file
|
||||
│ │ └── *-solution_1.op2 # Results (generated)
|
||||
│ │
|
||||
│ └── optimization_config.json # Study configuration
|
||||
│
|
||||
├── 2_iterations/ # WORKING DIRECTORY
|
||||
│ ├── iter_001/ # Iteration 1 model copy
|
||||
│ ├── iter_002/
|
||||
│ └── ...
|
||||
│
|
||||
├── 3_results/ # OUTPUT (auto-generated)
|
||||
│ ├── study.db # Optuna SQLite database
|
||||
│ ├── optimization.log # Structured log
|
||||
│ ├── surrogate_best.pt # Trained ensemble weights
|
||||
│ ├── tuning_results.json # Hyperparameter tuning
|
||||
│ └── convergence.json # Iteration history
|
||||
│
|
||||
├── run_optimization.py # Main entry point
|
||||
├── final_validation.py # FEA validation of best NN trials
|
||||
├── README.md # This blueprint
|
||||
└── STUDY_REPORT.md # Results report (updated during run)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Results Location
|
||||
|
||||
After optimization, results are stored in `3_results/`:
|
||||
|
||||
| File | Description | Format |
|
||||
|------|-------------|--------|
|
||||
| `study.db` | Optuna database with all trials (FEA + NN) | SQLite |
|
||||
| `optimization.log` | Detailed execution log | Text |
|
||||
| `surrogate_best.pt` | Tuned ensemble model weights | PyTorch |
|
||||
| `tuning_results.json` | Hyperparameter search history | JSON |
|
||||
| `convergence.json` | Best value per iteration | JSON |
|
||||
|
||||
### 8.1 Trial Identification
|
||||
|
||||
Trials are tagged with source:
|
||||
- **FEA trials**: `trial.user_attrs['trial_source'] = 'fea'`
|
||||
- **NN trials**: `trial.user_attrs['trial_source'] = 'nn'`
|
||||
|
||||
**Dashboard Visualization**:
|
||||
- FEA trials: Blue circles
|
||||
- NN trials: Orange crosses
|
||||
|
||||
### 8.2 Results Report
|
||||
|
||||
See [STUDY_REPORT.md](STUDY_REPORT.md) for:
|
||||
- Optimization progress and convergence
|
||||
- Best designs found (FEA-validated)
|
||||
- Surrogate model accuracy (R², MAE)
|
||||
- Pareto trade-off analysis
|
||||
- Engineering recommendations
|
||||
|
||||
---
|
||||
|
||||
## 9. Quick Start
|
||||
|
||||
### Launch Optimization
|
||||
|
||||
```bash
|
||||
cd studies/m1_mirror_adaptive_V12
|
||||
|
||||
# Start with default settings (uses V11 FEA data)
|
||||
python run_optimization.py --start
|
||||
|
||||
# Custom tuning parameters
|
||||
python run_optimization.py --start --tune-trials 30 --ensemble-size 3 --fea-batch 5 --patience 5
|
||||
|
||||
# Tune hyperparameters only (no FEA)
|
||||
python run_optimization.py --tune-only
|
||||
```
|
||||
|
||||
### Command Line Options
|
||||
|
||||
| Option | Default | Description |
|
||||
|--------|---------|-------------|
|
||||
| `--start` | - | Start adaptive optimization |
|
||||
| `--tune-only` | - | Only tune hyperparameters, no optimization |
|
||||
| `--tune-trials` | 30 | Number of hyperparameter tuning trials |
|
||||
| `--ensemble-size` | 3 | Number of models in ensemble |
|
||||
| `--fea-batch` | 5 | FEA evaluations per iteration |
|
||||
| `--patience` | 5 | Early stopping patience |
|
||||
|
||||
### Monitor Progress
|
||||
|
||||
```bash
|
||||
# View log
|
||||
tail -f 3_results/optimization.log
|
||||
|
||||
# Check database
|
||||
sqlite3 3_results/study.db "SELECT COUNT(*) FROM trials WHERE state='COMPLETE'"
|
||||
|
||||
# Launch Optuna dashboard
|
||||
optuna-dashboard sqlite:///3_results/study.db --port 8081
|
||||
```
|
||||
|
||||
### Dashboard Access
|
||||
|
||||
| Dashboard | URL | Purpose |
|
||||
|-----------|-----|---------|
|
||||
| **Atomizer Dashboard** | http://localhost:3000 | Real-time monitoring |
|
||||
| **Optuna Dashboard** | http://localhost:8081 | Trial history |
|
||||
|
||||
---
|
||||
|
||||
## 10. Configuration Reference
|
||||
|
||||
**File**: `1_setup/optimization_config.json`
|
||||
|
||||
| Section | Key | Description |
|
||||
|---------|-----|-------------|
|
||||
| `design_variables[]` | 11 parameters | All lateral/whiffle/blank params |
|
||||
| `objectives[]` | 3 WFE metrics | Relative filtered RMS |
|
||||
| `constraints[]` | mass_limit | Upper bound 99 kg |
|
||||
| `zernike_settings.n_modes` | 50 | Zernike polynomial count |
|
||||
| `zernike_settings.filter_low_orders` | 4 | Exclude J1-J4 |
|
||||
| `zernike_settings.subcases` | [1,2,3,4] | Zenith angles |
|
||||
| `adaptive_settings.max_iterations` | 100 | Loop limit |
|
||||
| `adaptive_settings.surrogate_trials_per_iter` | 1000 | NN trials |
|
||||
| `adaptive_settings.fea_batch_size` | 5 | FEA per iteration |
|
||||
| `adaptive_settings.patience` | 5 | Early stopping |
|
||||
| `surrogate_settings.ensemble_size` | 3 | Model ensemble |
|
||||
| `surrogate_settings.mc_dropout_samples` | 30 | Uncertainty samples |
|
||||
|
||||
---
|
||||
|
||||
## 11. References
|
||||
|
||||
- **Deb, K. et al.** (2002). A fast and elitist multiobjective genetic algorithm: NSGA-II. *IEEE TEC*.
|
||||
- **Noll, R.J.** (1976). Zernike polynomials and atmospheric turbulence. *JOSA*.
|
||||
- **Wilson, R.N.** (2004). *Reflecting Telescope Optics I*. Springer.
|
||||
- **Snoek, J. et al.** (2012). Practical Bayesian optimization of machine learning algorithms. *NeurIPS*.
|
||||
- **Gal, Y. & Ghahramani, Z.** (2016). Dropout as a Bayesian approximation. *ICML*.
|
||||
- **pyNastran Documentation**: BDF/OP2 parsing for FEA post-processing.
|
||||
- **Optuna Documentation**: Hyperparameter optimization framework.
|
||||
|
||||
---
|
||||
|
||||
*Atomizer V12: Where adaptive AI meets precision optics.*
|
||||
214
studies/m1_mirror_adaptive_V12/compute_full_calibration.py
Normal file
214
studies/m1_mirror_adaptive_V12/compute_full_calibration.py
Normal file
@@ -0,0 +1,214 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Compute Calibration Factors from Full FEA Dataset
|
||||
==================================================
|
||||
Uses ALL 153 FEA training samples to compute robust calibration factors.
|
||||
|
||||
This is much better than calibrating only on the GNN's "best" designs,
|
||||
which are clustered in a narrow region of the design space.
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
import torch
|
||||
from optimization_engine.gnn.gnn_optimizer import ZernikeGNNOptimizer
|
||||
|
||||
# Paths
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
CONFIG_PATH = STUDY_DIR / "1_setup" / "optimization_config.json"
|
||||
CHECKPOINT_PATH = Path("C:/Users/Antoine/Atomizer/zernike_gnn_checkpoint.pt")
|
||||
|
||||
# Objective names
|
||||
OBJECTIVES = [
|
||||
'rel_filtered_rms_40_vs_20',
|
||||
'rel_filtered_rms_60_vs_20',
|
||||
'mfg_90_optician_workload'
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
print("="*60)
|
||||
print("FULL DATASET CALIBRATION")
|
||||
print("="*60)
|
||||
|
||||
# Load GNN optimizer (includes trained model and config)
|
||||
print("\nLoading GNN model...")
|
||||
optimizer = ZernikeGNNOptimizer.from_checkpoint(CHECKPOINT_PATH, CONFIG_PATH)
|
||||
print(f" Design variables: {len(optimizer.design_names)}")
|
||||
|
||||
# Load training data from gnn_data folder
|
||||
print("\nLoading training data from gnn_data folder...")
|
||||
gnn_data_dir = STUDY_DIR / "gnn_data"
|
||||
|
||||
training_data = []
|
||||
if gnn_data_dir.exists():
|
||||
import h5py
|
||||
for trial_dir in sorted(gnn_data_dir.iterdir()):
|
||||
if trial_dir.is_dir() and trial_dir.name.startswith('trial_'):
|
||||
metadata_path = trial_dir / "metadata.json"
|
||||
field_path = trial_dir / "displacement_field.h5"
|
||||
|
||||
if metadata_path.exists():
|
||||
with open(metadata_path) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
if 'objectives' in metadata and metadata.get('objectives'):
|
||||
training_data.append({
|
||||
'design_vars': metadata['params'],
|
||||
'objectives': metadata['objectives'],
|
||||
})
|
||||
|
||||
if not training_data:
|
||||
# Fallback: load from V11 database
|
||||
print(" No gnn_data with objectives found, loading from V11 database...")
|
||||
import sqlite3
|
||||
v11_db = STUDY_DIR.parent / "m1_mirror_adaptive_V11" / "3_results" / "study.db"
|
||||
|
||||
if v11_db.exists():
|
||||
conn = sqlite3.connect(str(v11_db))
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get completed trials - filter for FEA trials only (source='fea' or no source means early trials)
|
||||
cursor.execute("""
|
||||
SELECT t.trial_id, t.number
|
||||
FROM trials t
|
||||
WHERE t.state = 'COMPLETE'
|
||||
""")
|
||||
trial_ids = cursor.fetchall()
|
||||
|
||||
for trial_id, trial_num in trial_ids:
|
||||
# Get user attributes
|
||||
cursor.execute("""
|
||||
SELECT key, value_json FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
attrs = {row[0]: json.loads(row[1]) for row in cursor.fetchall()}
|
||||
|
||||
# Check if this is an FEA trial (source contains 'FEA' - matches "FEA" and "V10_FEA")
|
||||
source = attrs.get('source', 'FEA') # Default to 'FEA' for old trials without source tag
|
||||
if 'FEA' not in source:
|
||||
continue # Skip NN trials
|
||||
|
||||
# Get params
|
||||
cursor.execute("""
|
||||
SELECT param_name, param_value FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
params = {row[0]: float(row[1]) for row in cursor.fetchall()}
|
||||
|
||||
# Check if objectives exist (stored as individual attributes)
|
||||
if all(obj in attrs for obj in OBJECTIVES):
|
||||
training_data.append({
|
||||
'design_vars': params,
|
||||
'objectives': {obj: attrs[obj] for obj in OBJECTIVES},
|
||||
})
|
||||
|
||||
conn.close()
|
||||
print(f" Found {len(training_data)} FEA trials in V11 database")
|
||||
|
||||
print(f" Loaded {len(training_data)} training samples")
|
||||
|
||||
if not training_data:
|
||||
print("\n ERROR: No training data found!")
|
||||
return 1
|
||||
|
||||
# Compute GNN predictions for all training samples
|
||||
print("\nComputing GNN predictions for all training samples...")
|
||||
|
||||
gnn_predictions = []
|
||||
fea_ground_truth = []
|
||||
|
||||
for i, sample in enumerate(training_data):
|
||||
# Get design variables
|
||||
design_vars = sample['design_vars']
|
||||
|
||||
# Get FEA ground truth objectives
|
||||
fea_obj = sample['objectives']
|
||||
|
||||
# Predict with GNN
|
||||
gnn_pred = optimizer.predict(design_vars)
|
||||
gnn_obj = gnn_pred.objectives
|
||||
|
||||
gnn_predictions.append(gnn_obj)
|
||||
fea_ground_truth.append(fea_obj)
|
||||
|
||||
if (i + 1) % 25 == 0:
|
||||
print(f" Processed {i+1}/{len(training_data)} samples")
|
||||
|
||||
print(f"\n Total: {len(gnn_predictions)} samples")
|
||||
|
||||
# Compute calibration factors for each objective
|
||||
print("\n" + "="*60)
|
||||
print("CALIBRATION RESULTS")
|
||||
print("="*60)
|
||||
|
||||
calibration = {}
|
||||
|
||||
for obj_name in OBJECTIVES:
|
||||
gnn_vals = np.array([p[obj_name] for p in gnn_predictions])
|
||||
fea_vals = np.array([f[obj_name] for f in fea_ground_truth])
|
||||
|
||||
# Calibration factor = mean(FEA / GNN)
|
||||
# This gives the multiplicative correction
|
||||
ratios = fea_vals / gnn_vals
|
||||
|
||||
factor = np.mean(ratios)
|
||||
factor_std = np.std(ratios)
|
||||
factor_cv = 100 * factor_std / factor # Coefficient of variation
|
||||
|
||||
# Also compute after-calibration errors
|
||||
calibrated_gnn = gnn_vals * factor
|
||||
abs_errors = np.abs(calibrated_gnn - fea_vals)
|
||||
pct_errors = 100 * abs_errors / fea_vals
|
||||
|
||||
calibration[obj_name] = {
|
||||
'factor': float(factor),
|
||||
'std': float(factor_std),
|
||||
'cv_pct': float(factor_cv),
|
||||
'calibrated_mean_error_pct': float(np.mean(pct_errors)),
|
||||
'calibrated_max_error_pct': float(np.max(pct_errors)),
|
||||
'raw_mean_error_pct': float(np.mean(100 * np.abs(gnn_vals - fea_vals) / fea_vals)),
|
||||
}
|
||||
|
||||
print(f"\n{obj_name}:")
|
||||
print(f" Calibration factor: {factor:.4f} ± {factor_std:.4f} (CV: {factor_cv:.1f}%)")
|
||||
print(f" Raw GNN error: {calibration[obj_name]['raw_mean_error_pct']:.1f}%")
|
||||
print(f" Calibrated error: {np.mean(pct_errors):.1f}% (max: {np.max(pct_errors):.1f}%)")
|
||||
|
||||
# Summary
|
||||
print("\n" + "="*60)
|
||||
print("SUMMARY")
|
||||
print("="*60)
|
||||
print(f"\nCalibration factors (multiply GNN predictions by these):")
|
||||
for obj_name in OBJECTIVES:
|
||||
print(f" {obj_name}: {calibration[obj_name]['factor']:.4f}")
|
||||
|
||||
print(f"\nExpected error reduction:")
|
||||
for obj_name in OBJECTIVES:
|
||||
raw = calibration[obj_name]['raw_mean_error_pct']
|
||||
cal = calibration[obj_name]['calibrated_mean_error_pct']
|
||||
print(f" {obj_name}: {raw:.1f}% → {cal:.1f}%")
|
||||
|
||||
# Save calibration
|
||||
output_path = STUDY_DIR / "full_calibration.json"
|
||||
result = {
|
||||
'timestamp': str(np.datetime64('now')),
|
||||
'n_samples': len(training_data),
|
||||
'calibration': calibration,
|
||||
'objectives': OBJECTIVES,
|
||||
}
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(f"\nCalibration saved to: {output_path}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
535
studies/m1_mirror_adaptive_V12/run_gnn_turbo.py
Normal file
535
studies/m1_mirror_adaptive_V12/run_gnn_turbo.py
Normal file
@@ -0,0 +1,535 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
M1 Mirror - GNN Turbo Optimization with FEA Validation
|
||||
=======================================================
|
||||
|
||||
Runs fast GNN-based turbo optimization (5000 trials in ~2 min) then
|
||||
validates top candidates with actual FEA (~5 min each).
|
||||
|
||||
Usage:
|
||||
python run_gnn_turbo.py # Full workflow: 5000 GNN + 5 FEA validations
|
||||
python run_gnn_turbo.py --gnn-only # Just GNN turbo, no FEA
|
||||
python run_gnn_turbo.py --validate-top 10 # Validate top 10 instead of 5
|
||||
python run_gnn_turbo.py --trials 10000 # More GNN trials
|
||||
|
||||
Estimated time: ~2 min GNN + ~25 min FEA validation = ~27 min total
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import argparse
|
||||
import logging
|
||||
import re
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# Add parent directories to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from optimization_engine.gnn.gnn_optimizer import ZernikeGNNOptimizer
|
||||
from optimization_engine.nx_solver import NXSolver
|
||||
from optimization_engine.utils import ensure_nx_running
|
||||
from optimization_engine.gnn.extract_displacement_field import (
|
||||
extract_displacement_field, save_field_to_hdf5
|
||||
)
|
||||
from optimization_engine.extractors.extract_zernike_surface import extract_surface_zernike
|
||||
|
||||
# ============================================================================
|
||||
# Paths
|
||||
# ============================================================================
|
||||
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
SETUP_DIR = STUDY_DIR / "1_setup"
|
||||
MODEL_DIR = SETUP_DIR / "model"
|
||||
CONFIG_PATH = SETUP_DIR / "optimization_config.json"
|
||||
CHECKPOINT_PATH = Path("C:/Users/Antoine/Atomizer/zernike_gnn_checkpoint.pt")
|
||||
RESULTS_DIR = STUDY_DIR / "gnn_turbo_results"
|
||||
LOG_FILE = STUDY_DIR / "gnn_turbo.log"
|
||||
|
||||
# Ensure directories exist
|
||||
RESULTS_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# ============================================================================
|
||||
# Logging
|
||||
# ============================================================================
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s | %(levelname)-8s | %(message)s',
|
||||
handlers=[
|
||||
logging.StreamHandler(sys.stdout),
|
||||
logging.FileHandler(LOG_FILE, mode='a')
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# GNN Turbo Runner
|
||||
# ============================================================================
|
||||
|
||||
class GNNTurboRunner:
|
||||
"""
|
||||
Run GNN turbo optimization with optional FEA validation.
|
||||
|
||||
Workflow:
|
||||
1. Load trained GNN model
|
||||
2. Run fast turbo optimization (5000 trials in ~2 min)
|
||||
3. Extract Pareto front and top candidates per objective
|
||||
4. Validate selected candidates with actual FEA
|
||||
5. Report GNN vs FEA accuracy
|
||||
"""
|
||||
|
||||
def __init__(self, config_path: Path, checkpoint_path: Path):
|
||||
logger.info("=" * 60)
|
||||
logger.info("GNN TURBO OPTIMIZER")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Load config
|
||||
with open(config_path) as f:
|
||||
self.config = json.load(f)
|
||||
|
||||
# Load GNN optimizer
|
||||
logger.info(f"Loading GNN from {checkpoint_path}")
|
||||
self.gnn = ZernikeGNNOptimizer.from_checkpoint(checkpoint_path, config_path)
|
||||
logger.info(f"GNN loaded. Design variables: {self.gnn.design_names}")
|
||||
logger.info(f"disp_scale: {self.gnn.disp_scale}")
|
||||
|
||||
# Design variable info
|
||||
self.design_vars = [v for v in self.config['design_variables'] if v.get('enabled', True)]
|
||||
self.objectives = self.config['objectives']
|
||||
self.objective_names = [obj['name'] for obj in self.objectives]
|
||||
|
||||
# NX Solver for FEA validation
|
||||
self.nx_solver = None # Lazy init
|
||||
|
||||
def _init_nx_solver(self):
|
||||
"""Initialize NX solver only when needed (for FEA validation)."""
|
||||
if self.nx_solver is not None:
|
||||
return
|
||||
|
||||
nx_settings = self.config.get('nx_settings', {})
|
||||
nx_install_dir = nx_settings.get('nx_install_path', 'C:\\Program Files\\Siemens\\NX2506')
|
||||
version_match = re.search(r'NX(\d+)', nx_install_dir)
|
||||
nastran_version = version_match.group(1) if version_match else "2506"
|
||||
|
||||
self.nx_solver = NXSolver(
|
||||
master_model_dir=str(MODEL_DIR),
|
||||
nx_install_dir=nx_install_dir,
|
||||
nastran_version=nastran_version,
|
||||
timeout=nx_settings.get('simulation_timeout_s', 600),
|
||||
use_iteration_folders=True,
|
||||
study_name="m1_mirror_adaptive_V12_gnn_validation"
|
||||
)
|
||||
|
||||
# Ensure NX is running
|
||||
ensure_nx_running(nx_install_dir)
|
||||
|
||||
def run_turbo(self, n_trials: int = 5000) -> dict:
|
||||
"""
|
||||
Run GNN turbo optimization.
|
||||
|
||||
Returns dict with:
|
||||
- all_predictions: List of all predictions
|
||||
- pareto_front: Pareto-optimal designs
|
||||
- best_per_objective: Best design for each objective
|
||||
"""
|
||||
logger.info(f"\nRunning turbo optimization ({n_trials} trials)...")
|
||||
start_time = time.time()
|
||||
|
||||
results = self.gnn.turbo_optimize(n_trials=n_trials, verbose=True)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
logger.info(f"Turbo completed in {elapsed:.1f}s ({n_trials/elapsed:.0f} trials/sec)")
|
||||
|
||||
# Get Pareto front
|
||||
pareto = results.get_pareto_front()
|
||||
logger.info(f"Found {len(pareto)} Pareto-optimal designs")
|
||||
|
||||
# Get best per objective
|
||||
best_per_obj = {}
|
||||
for obj_name in self.objective_names:
|
||||
best = results.get_best(n=1, objective=obj_name)[0]
|
||||
best_per_obj[obj_name] = best
|
||||
logger.info(f"Best {obj_name}: {best.objectives[obj_name]:.2f} nm")
|
||||
|
||||
return {
|
||||
'results': results,
|
||||
'pareto': pareto,
|
||||
'best_per_objective': best_per_obj,
|
||||
'elapsed_time': elapsed
|
||||
}
|
||||
|
||||
def select_validation_candidates(self, turbo_results: dict, n_validate: int = 5) -> list:
|
||||
"""
|
||||
Select diverse candidates for FEA validation.
|
||||
|
||||
Strategy: Select from Pareto front with diversity preference.
|
||||
If Pareto front has fewer than n_validate, add best per objective.
|
||||
"""
|
||||
candidates = []
|
||||
seen_designs = set()
|
||||
|
||||
pareto = turbo_results['pareto']
|
||||
best_per_obj = turbo_results['best_per_objective']
|
||||
|
||||
# First, add best per objective (most important to validate)
|
||||
for obj_name, pred in best_per_obj.items():
|
||||
design_key = tuple(round(v, 4) for v in pred.design.values())
|
||||
if design_key not in seen_designs:
|
||||
candidates.append({
|
||||
'design': pred.design,
|
||||
'gnn_objectives': pred.objectives,
|
||||
'source': f'best_{obj_name}'
|
||||
})
|
||||
seen_designs.add(design_key)
|
||||
|
||||
if len(candidates) >= n_validate:
|
||||
break
|
||||
|
||||
# Fill remaining slots from Pareto front
|
||||
if len(candidates) < n_validate and len(pareto) > 0:
|
||||
# Sort Pareto by sum of objectives (balanced designs)
|
||||
pareto_sorted = sorted(pareto,
|
||||
key=lambda p: sum(p.objectives.values()))
|
||||
|
||||
for pred in pareto_sorted:
|
||||
design_key = tuple(round(v, 4) for v in pred.design.values())
|
||||
if design_key not in seen_designs:
|
||||
candidates.append({
|
||||
'design': pred.design,
|
||||
'gnn_objectives': pred.objectives,
|
||||
'source': 'pareto_front'
|
||||
})
|
||||
seen_designs.add(design_key)
|
||||
|
||||
if len(candidates) >= n_validate:
|
||||
break
|
||||
|
||||
logger.info(f"Selected {len(candidates)} candidates for FEA validation:")
|
||||
for i, c in enumerate(candidates):
|
||||
logger.info(f" {i+1}. {c['source']}: 40vs20={c['gnn_objectives']['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
|
||||
return candidates
|
||||
|
||||
def run_fea_validation(self, design: dict, trial_num: int) -> dict:
|
||||
"""
|
||||
Run FEA for a single design and extract Zernike objectives.
|
||||
|
||||
Returns dict with success status and FEA objectives.
|
||||
"""
|
||||
self._init_nx_solver()
|
||||
|
||||
trial_dir = RESULTS_DIR / f"validation_{trial_num:04d}"
|
||||
trial_dir.mkdir(exist_ok=True)
|
||||
|
||||
logger.info(f"Validation {trial_num}: Running FEA...")
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Build expression updates
|
||||
expressions = {var['expression_name']: design[var['name']]
|
||||
for var in self.design_vars}
|
||||
|
||||
# Create iteration folder
|
||||
iter_folder = self.nx_solver.create_iteration_folder(
|
||||
iterations_base_dir=RESULTS_DIR / "iterations",
|
||||
iteration_number=trial_num,
|
||||
expression_updates=expressions
|
||||
)
|
||||
|
||||
# Run solve
|
||||
op2_path = self.nx_solver.run_solve(
|
||||
sim_file=iter_folder / self.config['nx_settings']['sim_file'],
|
||||
solution_name=self.config['nx_settings']['solution_name']
|
||||
)
|
||||
|
||||
if op2_path is None or not Path(op2_path).exists():
|
||||
logger.error(f"Validation {trial_num}: FEA solve failed - no OP2")
|
||||
return {'success': False, 'error': 'No OP2 file'}
|
||||
|
||||
# Extract Zernike objectives using the same extractor as training
|
||||
bdf_path = iter_folder / "model.bdf"
|
||||
if not bdf_path.exists():
|
||||
bdf_files = list(iter_folder.glob("*.bdf"))
|
||||
bdf_path = bdf_files[0] if bdf_files else None
|
||||
|
||||
# Use extract_surface_zernike to get objectives
|
||||
zernike_result = extract_surface_zernike(
|
||||
op2_path=str(op2_path),
|
||||
bdf_path=str(bdf_path),
|
||||
n_modes=50,
|
||||
r_inner=100.0,
|
||||
r_outer=650.0,
|
||||
n_radial=50,
|
||||
n_angular=60
|
||||
)
|
||||
|
||||
if not zernike_result.get('success', False):
|
||||
logger.error(f"Validation {trial_num}: Zernike extraction failed")
|
||||
return {'success': False, 'error': zernike_result.get('error', 'Unknown')}
|
||||
|
||||
# Compute relative objectives (same as GNN training data)
|
||||
objectives = self._compute_relative_objectives(zernike_result)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
logger.info(f"Validation {trial_num}: Completed in {elapsed:.1f}s")
|
||||
logger.info(f" 40vs20: {objectives['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
logger.info(f" 60vs20: {objectives['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
logger.info(f" mfg90: {objectives['mfg_90_optician_workload']:.2f} nm")
|
||||
|
||||
# Save results
|
||||
results = {
|
||||
'success': True,
|
||||
'design': design,
|
||||
'objectives': objectives,
|
||||
'op2_path': str(op2_path),
|
||||
'elapsed_time': elapsed
|
||||
}
|
||||
|
||||
with open(trial_dir / 'fea_result.json', 'w') as f:
|
||||
json.dump(results, f, indent=2)
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Validation {trial_num}: Error - {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
def _compute_relative_objectives(self, zernike_result: dict) -> dict:
|
||||
"""
|
||||
Compute relative Zernike objectives from extraction result.
|
||||
|
||||
Matches the exact computation used in GNN training data preparation.
|
||||
"""
|
||||
coeffs = zernike_result['data']['coefficients'] # Dict by subcase
|
||||
|
||||
# Subcase mapping: 1=90deg, 2=20deg(ref), 3=40deg, 4=60deg
|
||||
subcases = ['1', '2', '3', '4']
|
||||
|
||||
# Convert coefficients to arrays
|
||||
coeff_arrays = {}
|
||||
for sc in subcases:
|
||||
if sc in coeffs:
|
||||
coeff_arrays[sc] = np.array(coeffs[sc])
|
||||
|
||||
# Objective 1: rel_filtered_rms_40_vs_20
|
||||
# Relative = subcase 3 (40deg) - subcase 2 (20deg ref)
|
||||
# Filter: remove J1-J4 (first 4 modes)
|
||||
rel_40_vs_20 = coeff_arrays['3'] - coeff_arrays['2']
|
||||
rel_40_vs_20_filtered = rel_40_vs_20[4:] # Skip J1-J4
|
||||
rms_40_vs_20 = np.sqrt(np.sum(rel_40_vs_20_filtered ** 2))
|
||||
|
||||
# Objective 2: rel_filtered_rms_60_vs_20
|
||||
rel_60_vs_20 = coeff_arrays['4'] - coeff_arrays['2']
|
||||
rel_60_vs_20_filtered = rel_60_vs_20[4:] # Skip J1-J4
|
||||
rms_60_vs_20 = np.sqrt(np.sum(rel_60_vs_20_filtered ** 2))
|
||||
|
||||
# Objective 3: mfg_90_optician_workload (J1-J3 filtered, keep J4 defocus)
|
||||
rel_90_vs_20 = coeff_arrays['1'] - coeff_arrays['2']
|
||||
rel_90_vs_20_filtered = rel_90_vs_20[3:] # Skip only J1-J3 (keep J4 defocus)
|
||||
rms_mfg_90 = np.sqrt(np.sum(rel_90_vs_20_filtered ** 2))
|
||||
|
||||
return {
|
||||
'rel_filtered_rms_40_vs_20': float(rms_40_vs_20),
|
||||
'rel_filtered_rms_60_vs_20': float(rms_60_vs_20),
|
||||
'mfg_90_optician_workload': float(rms_mfg_90)
|
||||
}
|
||||
|
||||
def compare_results(self, candidates: list) -> dict:
|
||||
"""
|
||||
Compare GNN predictions vs FEA results.
|
||||
|
||||
Returns accuracy statistics.
|
||||
"""
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("GNN vs FEA COMPARISON")
|
||||
logger.info("=" * 60)
|
||||
|
||||
errors = {obj: [] for obj in self.objective_names}
|
||||
|
||||
for c in candidates:
|
||||
if 'fea_objectives' not in c or not c.get('fea_success', False):
|
||||
continue
|
||||
|
||||
gnn = c['gnn_objectives']
|
||||
fea = c['fea_objectives']
|
||||
|
||||
logger.info(f"\n{c['source']}:")
|
||||
logger.info(f" {'Objective':<30} {'GNN':<10} {'FEA':<10} {'Error':<10}")
|
||||
logger.info(f" {'-'*60}")
|
||||
|
||||
for obj in self.objective_names:
|
||||
gnn_val = gnn[obj]
|
||||
fea_val = fea[obj]
|
||||
error_pct = abs(gnn_val - fea_val) / fea_val * 100 if fea_val > 0 else 0
|
||||
|
||||
logger.info(f" {obj:<30} {gnn_val:<10.2f} {fea_val:<10.2f} {error_pct:<10.1f}%")
|
||||
errors[obj].append(error_pct)
|
||||
|
||||
# Summary statistics
|
||||
logger.info("\n" + "-" * 60)
|
||||
logger.info("SUMMARY STATISTICS")
|
||||
logger.info("-" * 60)
|
||||
|
||||
summary = {}
|
||||
for obj in self.objective_names:
|
||||
if errors[obj]:
|
||||
mean_err = np.mean(errors[obj])
|
||||
max_err = np.max(errors[obj])
|
||||
summary[obj] = {'mean_error_pct': mean_err, 'max_error_pct': max_err}
|
||||
logger.info(f"{obj}: Mean error = {mean_err:.1f}%, Max error = {max_err:.1f}%")
|
||||
|
||||
return summary
|
||||
|
||||
def run_full_workflow(self, n_trials: int = 5000, n_validate: int = 5, gnn_only: bool = False):
|
||||
"""
|
||||
Run complete workflow: GNN turbo → select candidates → FEA validation → comparison.
|
||||
"""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
# Phase 1: GNN Turbo
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("PHASE 1: GNN TURBO OPTIMIZATION")
|
||||
logger.info("=" * 60)
|
||||
|
||||
turbo_results = self.run_turbo(n_trials=n_trials)
|
||||
|
||||
# Save turbo results
|
||||
turbo_summary = {
|
||||
'timestamp': timestamp,
|
||||
'n_trials': n_trials,
|
||||
'n_pareto': len(turbo_results['pareto']),
|
||||
'elapsed_time': turbo_results['elapsed_time'],
|
||||
'best_per_objective': {
|
||||
obj: {
|
||||
'design': pred.design,
|
||||
'objectives': pred.objectives
|
||||
}
|
||||
for obj, pred in turbo_results['best_per_objective'].items()
|
||||
},
|
||||
'pareto_front': [
|
||||
{'design': p.design, 'objectives': p.objectives}
|
||||
for p in turbo_results['pareto'][:20] # Top 20 from Pareto
|
||||
]
|
||||
}
|
||||
|
||||
turbo_file = RESULTS_DIR / f'turbo_results_{timestamp}.json'
|
||||
with open(turbo_file, 'w') as f:
|
||||
json.dump(turbo_summary, f, indent=2)
|
||||
logger.info(f"Turbo results saved to {turbo_file}")
|
||||
|
||||
if gnn_only:
|
||||
logger.info("\n--gnn-only flag set, skipping FEA validation")
|
||||
return {'turbo': turbo_summary}
|
||||
|
||||
# Phase 2: FEA Validation
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("PHASE 2: FEA VALIDATION")
|
||||
logger.info("=" * 60)
|
||||
|
||||
candidates = self.select_validation_candidates(turbo_results, n_validate=n_validate)
|
||||
|
||||
for i, candidate in enumerate(candidates):
|
||||
logger.info(f"\n--- Validating candidate {i+1}/{len(candidates)} ---")
|
||||
fea_result = self.run_fea_validation(candidate['design'], trial_num=i+1)
|
||||
candidate['fea_success'] = fea_result.get('success', False)
|
||||
if fea_result.get('success'):
|
||||
candidate['fea_objectives'] = fea_result['objectives']
|
||||
candidate['fea_time'] = fea_result.get('elapsed_time', 0)
|
||||
|
||||
# Phase 3: Comparison
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("PHASE 3: RESULTS COMPARISON")
|
||||
logger.info("=" * 60)
|
||||
|
||||
comparison = self.compare_results(candidates)
|
||||
|
||||
# Save final report
|
||||
final_report = {
|
||||
'timestamp': timestamp,
|
||||
'turbo_summary': turbo_summary,
|
||||
'validation_candidates': [
|
||||
{
|
||||
'source': c['source'],
|
||||
'design': c['design'],
|
||||
'gnn_objectives': c['gnn_objectives'],
|
||||
'fea_objectives': c.get('fea_objectives'),
|
||||
'fea_success': c.get('fea_success', False),
|
||||
'fea_time': c.get('fea_time')
|
||||
}
|
||||
for c in candidates
|
||||
],
|
||||
'accuracy_summary': comparison
|
||||
}
|
||||
|
||||
report_file = RESULTS_DIR / f'gnn_turbo_report_{timestamp}.json'
|
||||
with open(report_file, 'w') as f:
|
||||
json.dump(final_report, f, indent=2)
|
||||
logger.info(f"\nFinal report saved to {report_file}")
|
||||
|
||||
# Print final summary
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("WORKFLOW COMPLETE")
|
||||
logger.info("=" * 60)
|
||||
logger.info(f"GNN Turbo: {n_trials} trials in {turbo_results['elapsed_time']:.1f}s")
|
||||
logger.info(f"Pareto front: {len(turbo_results['pareto'])} designs")
|
||||
|
||||
successful_validations = sum(1 for c in candidates if c.get('fea_success', False))
|
||||
logger.info(f"FEA Validations: {successful_validations}/{len(candidates)} successful")
|
||||
|
||||
if comparison:
|
||||
avg_errors = [np.mean([comparison[obj]['mean_error_pct'] for obj in comparison])]
|
||||
logger.info(f"Overall GNN accuracy: {100 - np.mean(avg_errors):.1f}%")
|
||||
|
||||
return final_report
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="GNN Turbo Optimization with FEA Validation",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=__doc__
|
||||
)
|
||||
parser.add_argument('--trials', type=int, default=5000,
|
||||
help='Number of GNN turbo trials (default: 5000)')
|
||||
parser.add_argument('--validate-top', type=int, default=5,
|
||||
help='Number of top candidates to validate with FEA (default: 5)')
|
||||
parser.add_argument('--gnn-only', action='store_true',
|
||||
help='Run only GNN turbo, skip FEA validation')
|
||||
parser.add_argument('--checkpoint', type=str, default=str(CHECKPOINT_PATH),
|
||||
help='Path to GNN checkpoint')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info(f"Starting GNN Turbo Optimization")
|
||||
logger.info(f" Checkpoint: {args.checkpoint}")
|
||||
logger.info(f" GNN trials: {args.trials}")
|
||||
logger.info(f" FEA validations: {args.validate_top if not args.gnn_only else 'SKIP'}")
|
||||
|
||||
runner = GNNTurboRunner(
|
||||
config_path=CONFIG_PATH,
|
||||
checkpoint_path=Path(args.checkpoint)
|
||||
)
|
||||
|
||||
report = runner.run_full_workflow(
|
||||
n_trials=args.trials,
|
||||
n_validate=args.validate_top,
|
||||
gnn_only=args.gnn_only
|
||||
)
|
||||
|
||||
logger.info("\nDone!")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
1137
studies/m1_mirror_adaptive_V12/run_optimization.py
Normal file
1137
studies/m1_mirror_adaptive_V12/run_optimization.py
Normal file
File diff suppressed because it is too large
Load Diff
239
studies/m1_mirror_adaptive_V12/validate_gnn_best.py
Normal file
239
studies/m1_mirror_adaptive_V12/validate_gnn_best.py
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Validate GNN Best Designs with FEA
|
||||
===================================
|
||||
Reads best designs from gnn_turbo_results.json and validates with actual FEA.
|
||||
|
||||
Usage:
|
||||
python validate_gnn_best.py # Full validation (solve + extract)
|
||||
python validate_gnn_best.py --resume # Resume: skip existing OP2, just extract Zernike
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from optimization_engine.gnn.gnn_optimizer import ZernikeGNNOptimizer, GNNPrediction
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
|
||||
# Paths
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
RESULTS_FILE = STUDY_DIR / "gnn_turbo_results.json"
|
||||
CONFIG_PATH = STUDY_DIR / "1_setup" / "optimization_config.json"
|
||||
CHECKPOINT_PATH = Path("C:/Users/Antoine/Atomizer/zernike_gnn_checkpoint.pt")
|
||||
|
||||
|
||||
def extract_from_existing_op2(study_dir: Path, turbo_results: dict, config: dict) -> list:
|
||||
"""Extract Zernike from existing OP2 files in iter9000-9002."""
|
||||
import numpy as np
|
||||
|
||||
iterations_dir = study_dir / "2_iterations"
|
||||
zernike_settings = config.get('zernike_settings', {})
|
||||
|
||||
results = []
|
||||
design_keys = ['best_40_vs_20', 'best_60_vs_20', 'best_mfg_90']
|
||||
|
||||
for i, key in enumerate(design_keys):
|
||||
trial_num = 9000 + i
|
||||
iter_dir = iterations_dir / f"iter{trial_num}"
|
||||
|
||||
print(f"\n[{i+1}/3] Processing {iter_dir.name} ({key})")
|
||||
|
||||
# Find OP2 file
|
||||
op2_files = list(iter_dir.glob("*-solution_1.op2"))
|
||||
if not op2_files:
|
||||
print(f" ERROR: No OP2 file found")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'no_op2',
|
||||
'trial_num': trial_num
|
||||
})
|
||||
continue
|
||||
|
||||
op2_path = op2_files[0]
|
||||
size_mb = op2_path.stat().st_size / 1e6
|
||||
print(f" OP2: {op2_path.name} ({size_mb:.1f} MB)")
|
||||
|
||||
if size_mb < 50:
|
||||
print(f" ERROR: OP2 too small, likely incomplete")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'incomplete_op2',
|
||||
'trial_num': trial_num
|
||||
})
|
||||
continue
|
||||
|
||||
# Extract Zernike
|
||||
try:
|
||||
extractor = ZernikeExtractor(
|
||||
str(op2_path),
|
||||
bdf_path=None,
|
||||
displacement_unit=zernike_settings.get('displacement_unit', 'mm'),
|
||||
n_modes=zernike_settings.get('n_modes', 50),
|
||||
filter_orders=zernike_settings.get('filter_low_orders', 4)
|
||||
)
|
||||
|
||||
ref = zernike_settings.get('reference_subcase', '2')
|
||||
|
||||
# Extract objectives: 40 vs 20, 60 vs 20, mfg 90
|
||||
rel_40 = extractor.extract_relative("3", ref)
|
||||
rel_60 = extractor.extract_relative("4", ref)
|
||||
rel_90 = extractor.extract_relative("1", ref)
|
||||
|
||||
fea_objectives = {
|
||||
'rel_filtered_rms_40_vs_20': rel_40['relative_filtered_rms_nm'],
|
||||
'rel_filtered_rms_60_vs_20': rel_60['relative_filtered_rms_nm'],
|
||||
'mfg_90_optician_workload': rel_90['relative_rms_filter_j1to3'],
|
||||
}
|
||||
|
||||
# Compute errors
|
||||
gnn_obj = turbo_results[key]['objectives']
|
||||
errors = {}
|
||||
for obj_name in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']:
|
||||
gnn_val = gnn_obj[obj_name]
|
||||
fea_val = fea_objectives[obj_name]
|
||||
errors[f'{obj_name}_abs_error'] = abs(gnn_val - fea_val)
|
||||
errors[f'{obj_name}_pct_error'] = 100 * abs(gnn_val - fea_val) / max(fea_val, 0.01)
|
||||
|
||||
print(f" FEA: 40vs20={fea_objectives['rel_filtered_rms_40_vs_20']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['rel_filtered_rms_40_vs_20']:.2f}, err: {errors['rel_filtered_rms_40_vs_20_pct_error']:.1f}%)")
|
||||
print(f" 60vs20={fea_objectives['rel_filtered_rms_60_vs_20']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['rel_filtered_rms_60_vs_20']:.2f}, err: {errors['rel_filtered_rms_60_vs_20_pct_error']:.1f}%)")
|
||||
print(f" mfg90={fea_objectives['mfg_90_optician_workload']:.2f} nm "
|
||||
f"(GNN: {gnn_obj['mfg_90_optician_workload']:.2f}, err: {errors['mfg_90_optician_workload_pct_error']:.1f}%)")
|
||||
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': gnn_obj,
|
||||
'fea_objectives': fea_objectives,
|
||||
'errors': errors,
|
||||
'trial_num': trial_num,
|
||||
'status': 'success'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR extracting Zernike: {e}")
|
||||
results.append({
|
||||
'design': turbo_results[key]['design_vars'],
|
||||
'gnn_objectives': turbo_results[key]['objectives'],
|
||||
'fea_objectives': None,
|
||||
'status': 'extraction_error',
|
||||
'error': str(e),
|
||||
'trial_num': trial_num
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Validate GNN predictions with FEA')
|
||||
parser.add_argument('--resume', action='store_true',
|
||||
help='Resume: extract Zernike from existing OP2 files instead of re-solving')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load GNN turbo results
|
||||
print("Loading GNN turbo results...")
|
||||
with open(RESULTS_FILE) as f:
|
||||
turbo_results = json.load(f)
|
||||
|
||||
# Load config
|
||||
with open(CONFIG_PATH) as f:
|
||||
config = json.load(f)
|
||||
|
||||
# Show candidates
|
||||
candidates = []
|
||||
for key in ['best_40_vs_20', 'best_60_vs_20', 'best_mfg_90']:
|
||||
data = turbo_results[key]
|
||||
pred = GNNPrediction(
|
||||
design_vars=data['design_vars'],
|
||||
objectives={k: float(v) for k, v in data['objectives'].items()}
|
||||
)
|
||||
candidates.append(pred)
|
||||
print(f"\n{key}:")
|
||||
print(f" 40vs20: {pred.objectives['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
print(f" 60vs20: {pred.objectives['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
print(f" mfg90: {pred.objectives['mfg_90_optician_workload']:.2f} nm")
|
||||
|
||||
if args.resume:
|
||||
# Resume mode: extract from existing OP2 files
|
||||
print("\n" + "="*60)
|
||||
print("RESUME MODE: Extracting Zernike from existing OP2 files")
|
||||
print("="*60)
|
||||
|
||||
validation_results = extract_from_existing_op2(STUDY_DIR, turbo_results, config)
|
||||
else:
|
||||
# Full mode: run FEA + extract
|
||||
print("\n" + "="*60)
|
||||
print("LOADING GNN OPTIMIZER FOR FEA VALIDATION")
|
||||
print("="*60)
|
||||
|
||||
optimizer = ZernikeGNNOptimizer.from_checkpoint(CHECKPOINT_PATH, CONFIG_PATH)
|
||||
print(f"Design variables: {len(optimizer.design_names)}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("RUNNING FEA VALIDATION")
|
||||
print("="*60)
|
||||
|
||||
validation_results = optimizer.validate_with_fea(
|
||||
candidates=candidates,
|
||||
study_dir=STUDY_DIR,
|
||||
verbose=True,
|
||||
start_trial_num=9000
|
||||
)
|
||||
|
||||
# Summary
|
||||
import numpy as np
|
||||
successful = [r for r in validation_results if r['status'] == 'success']
|
||||
print(f"\n{'='*60}")
|
||||
print(f"VALIDATION SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
print(f"Successful: {len(successful)}/{len(validation_results)}")
|
||||
|
||||
if successful:
|
||||
avg_errors = {}
|
||||
for obj in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']:
|
||||
avg_errors[obj] = np.mean([r['errors'][f'{obj}_pct_error'] for r in successful])
|
||||
|
||||
print(f"\nAverage GNN prediction errors:")
|
||||
print(f" 40 vs 20: {avg_errors['rel_filtered_rms_40_vs_20']:.1f}%")
|
||||
print(f" 60 vs 20: {avg_errors['rel_filtered_rms_60_vs_20']:.1f}%")
|
||||
print(f" mfg 90: {avg_errors['mfg_90_optician_workload']:.1f}%")
|
||||
|
||||
# Save validation report
|
||||
from datetime import datetime
|
||||
output_path = STUDY_DIR / "gnn_validation_report.json"
|
||||
|
||||
report = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'mode': 'resume' if args.resume else 'full',
|
||||
'n_candidates': len(validation_results),
|
||||
'n_successful': len(successful),
|
||||
'results': validation_results,
|
||||
}
|
||||
|
||||
if successful:
|
||||
report['error_summary'] = {
|
||||
obj: {
|
||||
'mean_pct': float(np.mean([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
'std_pct': float(np.std([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
'max_pct': float(np.max([r['errors'][f'{obj}_pct_error'] for r in successful])),
|
||||
}
|
||||
for obj in ['rel_filtered_rms_40_vs_20', 'rel_filtered_rms_60_vs_20', 'mfg_90_optician_workload']
|
||||
}
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(report, f, indent=2)
|
||||
|
||||
print(f"\nValidation report saved to: {output_path}")
|
||||
print("\nDone!")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
194
studies/m1_mirror_adaptive_V13/1_setup/optimization_config.json
Normal file
194
studies/m1_mirror_adaptive_V13/1_setup/optimization_config.json
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"$schema": "Atomizer M1 Mirror NSGA-II Pure FEA Optimization V13",
|
||||
"study_name": "m1_mirror_adaptive_V13",
|
||||
"description": "V13 - Pure NSGA-II multi-objective optimization with FEA only. No surrogate. Seeds from V11+V12 FEA data.",
|
||||
|
||||
"source_studies": {
|
||||
"v11": {
|
||||
"database": "../m1_mirror_adaptive_V11/3_results/study.db",
|
||||
"description": "V11 FEA trials (107 from V10 + V11)"
|
||||
},
|
||||
"v12": {
|
||||
"database": "../m1_mirror_adaptive_V12/3_results/study.db",
|
||||
"description": "V12 FEA trials from GNN validation"
|
||||
}
|
||||
},
|
||||
|
||||
"design_variables": [
|
||||
{
|
||||
"name": "lateral_inner_angle",
|
||||
"expression_name": "lateral_inner_angle",
|
||||
"min": 25.0,
|
||||
"max": 28.5,
|
||||
"baseline": 26.79,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_angle",
|
||||
"expression_name": "lateral_outer_angle",
|
||||
"min": 13.0,
|
||||
"max": 17.0,
|
||||
"baseline": 14.64,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_outer_pivot",
|
||||
"expression_name": "lateral_outer_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.40,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_inner_pivot",
|
||||
"expression_name": "lateral_inner_pivot",
|
||||
"min": 9.0,
|
||||
"max": 12.0,
|
||||
"baseline": 10.07,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_middle_pivot",
|
||||
"expression_name": "lateral_middle_pivot",
|
||||
"min": 18.0,
|
||||
"max": 23.0,
|
||||
"baseline": 20.73,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "lateral_closeness",
|
||||
"expression_name": "lateral_closeness",
|
||||
"min": 9.5,
|
||||
"max": 12.5,
|
||||
"baseline": 11.02,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_min",
|
||||
"expression_name": "whiffle_min",
|
||||
"min": 35.0,
|
||||
"max": 55.0,
|
||||
"baseline": 40.55,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_outer_to_vertical",
|
||||
"expression_name": "whiffle_outer_to_vertical",
|
||||
"min": 68.0,
|
||||
"max": 80.0,
|
||||
"baseline": 75.67,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "whiffle_triangle_closeness",
|
||||
"expression_name": "whiffle_triangle_closeness",
|
||||
"min": 50.0,
|
||||
"max": 65.0,
|
||||
"baseline": 60.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "blank_backface_angle",
|
||||
"expression_name": "blank_backface_angle",
|
||||
"min": 4,
|
||||
"max": 5.0,
|
||||
"baseline": 4.23,
|
||||
"units": "degrees",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "inner_circular_rib_dia",
|
||||
"expression_name": "inner_circular_rib_dia",
|
||||
"min": 480.0,
|
||||
"max": 620.0,
|
||||
"baseline": 534.00,
|
||||
"units": "mm",
|
||||
"enabled": true
|
||||
}
|
||||
],
|
||||
|
||||
"objectives": [
|
||||
{
|
||||
"name": "rel_filtered_rms_40_vs_20",
|
||||
"description": "Filtered RMS WFE at 40 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 4.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "3",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "rel_filtered_rms_60_vs_20",
|
||||
"description": "Filtered RMS WFE at 60 deg relative to 20 deg reference (operational tracking)",
|
||||
"direction": "minimize",
|
||||
"weight": 5.0,
|
||||
"target": 10.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "4",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_filtered_rms_nm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "mfg_90_optician_workload",
|
||||
"description": "Manufacturing deformation at 90 deg polishing (J1-J3 filtered RMS)",
|
||||
"direction": "minimize",
|
||||
"weight": 1.0,
|
||||
"target": 20.0,
|
||||
"units": "nm",
|
||||
"extractor_config": {
|
||||
"target_subcase": "1",
|
||||
"reference_subcase": "2",
|
||||
"metric": "relative_rms_filter_j1to3"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"zernike_settings": {
|
||||
"n_modes": 50,
|
||||
"filter_low_orders": 4,
|
||||
"displacement_unit": "mm",
|
||||
"subcases": ["1", "2", "3", "4"],
|
||||
"subcase_labels": {"1": "90deg", "2": "20deg", "3": "40deg", "4": "60deg"},
|
||||
"reference_subcase": "2"
|
||||
},
|
||||
|
||||
"nsga2_settings": {
|
||||
"population_size": 20,
|
||||
"n_generations": 50,
|
||||
"crossover_prob": 0.9,
|
||||
"mutation_prob": 0.1,
|
||||
"seed_from_prior": true
|
||||
},
|
||||
|
||||
"nx_settings": {
|
||||
"nx_install_path": "C:\\Program Files\\Siemens\\NX2506",
|
||||
"sim_file": "ASSY_M1_assyfem1_sim1.sim",
|
||||
"solution_name": "Solution 1",
|
||||
"op2_pattern": "*-solution_1.op2",
|
||||
"simulation_timeout_s": 600,
|
||||
"journal_timeout_s": 120,
|
||||
"op2_timeout_s": 1800,
|
||||
"auto_start_nx": true
|
||||
},
|
||||
|
||||
"dashboard_settings": {
|
||||
"trial_source_tag": true,
|
||||
"fea_marker": "circle",
|
||||
"fea_color": "#4CAF50"
|
||||
}
|
||||
}
|
||||
210
studies/m1_mirror_adaptive_V13/README.md
Normal file
210
studies/m1_mirror_adaptive_V13/README.md
Normal file
@@ -0,0 +1,210 @@
|
||||
# M1 Mirror Pure NSGA-II FEA Optimization V13
|
||||
|
||||
Pure multi-objective FEA optimization with NSGA-II sampler for the M1 telescope mirror support system.
|
||||
|
||||
**Created**: 2025-12-09
|
||||
**Protocol**: Pure NSGA-II Multi-Objective (No Neural Surrogate)
|
||||
**Status**: Running
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
V13 runs **pure FEA optimization** without any neural surrogate to establish ground-truth Pareto front. This serves as:
|
||||
|
||||
1. **Baseline** for evaluating GNN/MLP surrogate accuracy
|
||||
2. **Ground truth** Pareto front for comparison
|
||||
3. **Validation data** for future surrogate training
|
||||
|
||||
### Key Difference from V11/V12
|
||||
|
||||
| Aspect | V11 (Adaptive MLP) | V12 (GNN + Validation) | V13 (Pure FEA) |
|
||||
|--------|-------------------|------------------------|----------------|
|
||||
| Surrogate | MLP (4-layer) | Zernike GNN | None |
|
||||
| Sampler | TPE | NSGA-II | NSGA-II |
|
||||
| Trials/hour | ~100 NN + 5 FEA | ~5000 GNN + 5 FEA | 6-7 FEA |
|
||||
| Purpose | Fast exploration | Field prediction | Ground truth |
|
||||
|
||||
---
|
||||
|
||||
## 2. Seeding Strategy
|
||||
|
||||
V13 seeds from **all prior FEA data** in V11 and V12:
|
||||
|
||||
```
|
||||
V11 (107 FEA trials) + V12 (131 FEA trials) = 238 total
|
||||
│
|
||||
┌──────────┴──────────┐
|
||||
│ Parameter Filter │
|
||||
│ (blank_backface │
|
||||
│ 4.0-5.0 range) │
|
||||
└──────────┬──────────┘
|
||||
│
|
||||
217 trials seeded into V13
|
||||
```
|
||||
|
||||
### Why 21 Trials Were Skipped
|
||||
|
||||
V13 config uses `blank_backface_angle: [4.0, 5.0]` (intentionally narrower).
|
||||
Trials from V10/V11 with `blank_backface_angle < 4.0` (range was 3.5-5.0) were rejected by Optuna.
|
||||
|
||||
---
|
||||
|
||||
## 3. Mathematical Formulation
|
||||
|
||||
### 3.1 Objectives (Same as V11/V12)
|
||||
|
||||
| Objective | Goal | Formula | Target | Units |
|
||||
|-----------|------|---------|--------|-------|
|
||||
| `rel_filtered_rms_40_vs_20` | minimize | RMS_filt(Z_40 - Z_20) | 4.0 | nm |
|
||||
| `rel_filtered_rms_60_vs_20` | minimize | RMS_filt(Z_60 - Z_20) | 10.0 | nm |
|
||||
| `mfg_90_optician_workload` | minimize | RMS_J1-J3(Z_90 - Z_20) | 20.0 | nm |
|
||||
|
||||
### 3.2 Design Variables (11)
|
||||
|
||||
| Parameter | Bounds | Units |
|
||||
|-----------|--------|-------|
|
||||
| lateral_inner_angle | [25.0, 28.5] | deg |
|
||||
| lateral_outer_angle | [13.0, 17.0] | deg |
|
||||
| lateral_outer_pivot | [9.0, 12.0] | mm |
|
||||
| lateral_inner_pivot | [9.0, 12.0] | mm |
|
||||
| lateral_middle_pivot | [18.0, 23.0] | mm |
|
||||
| lateral_closeness | [9.5, 12.5] | mm |
|
||||
| whiffle_min | [35.0, 55.0] | mm |
|
||||
| whiffle_outer_to_vertical | [68.0, 80.0] | deg |
|
||||
| whiffle_triangle_closeness | [50.0, 65.0] | mm |
|
||||
| blank_backface_angle | [4.0, 5.0] | deg |
|
||||
| inner_circular_rib_dia | [480.0, 620.0] | mm |
|
||||
|
||||
---
|
||||
|
||||
## 4. NSGA-II Configuration
|
||||
|
||||
```python
|
||||
sampler = NSGAIISampler(
|
||||
population_size=50,
|
||||
crossover=SBXCrossover(eta=15),
|
||||
mutation=PolynomialMutation(eta=20),
|
||||
seed=42
|
||||
)
|
||||
```
|
||||
|
||||
NSGA-II performs true multi-objective optimization:
|
||||
- **Non-dominated sorting** for Pareto ranking
|
||||
- **Crowding distance** for diversity preservation
|
||||
- **No scalarization** - preserves full Pareto front
|
||||
|
||||
---
|
||||
|
||||
## 5. Study Structure
|
||||
|
||||
```
|
||||
m1_mirror_adaptive_V13/
|
||||
├── 1_setup/
|
||||
│ ├── model/ # NX model files (from V11)
|
||||
│ └── optimization_config.json # Study config
|
||||
├── 2_iterations/
|
||||
│ └── iter{N}/ # FEA working directories
|
||||
│ ├── *.prt, *.fem, *.sim # NX files
|
||||
│ ├── params.exp # Parameter expressions
|
||||
│ └── *solution_1.op2 # Results
|
||||
├── 3_results/
|
||||
│ └── study.db # Optuna database
|
||||
├── run_optimization.py # Main entry point
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Usage
|
||||
|
||||
```bash
|
||||
# Start fresh optimization
|
||||
python run_optimization.py --start --trials 55
|
||||
|
||||
# Resume after interruption (Windows update, etc.)
|
||||
python run_optimization.py --start --trials 35 --resume
|
||||
|
||||
# Check status
|
||||
python run_optimization.py --status
|
||||
```
|
||||
|
||||
### Expected Runtime
|
||||
|
||||
- ~8-10 min per FEA trial
|
||||
- 55 trials ≈ 7-8 hours overnight
|
||||
|
||||
---
|
||||
|
||||
## 7. Trial Sources in Database
|
||||
|
||||
| Source Tag | Count | Description |
|
||||
|------------|-------|-------------|
|
||||
| `V11_FEA` | 5 | V11-only FEA trials |
|
||||
| `V11_V10_FEA` | 81 | V11 trials inherited from V10 |
|
||||
| `V12_FEA` | 41 | V12-only FEA trials |
|
||||
| `V12_V10_FEA` | 90 | V12 trials inherited from V10 |
|
||||
| `FEA` | 10+ | New V13 FEA trials |
|
||||
|
||||
Query trial sources:
|
||||
```sql
|
||||
SELECT value_json, COUNT(*)
|
||||
FROM trial_user_attributes
|
||||
WHERE key = 'source'
|
||||
GROUP BY value_json;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Post-Processing
|
||||
|
||||
### Extract Pareto Front
|
||||
|
||||
```python
|
||||
import optuna
|
||||
|
||||
study = optuna.load_study(
|
||||
study_name="m1_mirror_V13_nsga2",
|
||||
storage="sqlite:///3_results/study.db"
|
||||
)
|
||||
|
||||
# Get Pareto-optimal trials
|
||||
pareto = study.best_trials
|
||||
|
||||
# Print Pareto front
|
||||
for t in pareto:
|
||||
print(f"Trial {t.number}: {t.values}")
|
||||
```
|
||||
|
||||
### Compare to GNN Predictions
|
||||
|
||||
```python
|
||||
# Load V13 FEA Pareto front
|
||||
# Load GNN predictions from V12
|
||||
# Compute error: |GNN - FEA| / FEA
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. Results (To Be Updated)
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Seeded trials | 217 |
|
||||
| New FEA trials | TBD |
|
||||
| Pareto front size | TBD |
|
||||
| Best rel_rms_40 | TBD |
|
||||
| Best rel_rms_60 | TBD |
|
||||
| Best mfg_90 | TBD |
|
||||
|
||||
---
|
||||
|
||||
## 10. Cross-References
|
||||
|
||||
- **V10**: `../m1_mirror_zernike_optimization_V10/` - Original LHS sampling
|
||||
- **V11**: `../m1_mirror_adaptive_V11/` - MLP adaptive surrogate
|
||||
- **V12**: `../m1_mirror_adaptive_V12/` - GNN field prediction
|
||||
|
||||
---
|
||||
|
||||
*Generated by Atomizer Framework. Pure NSGA-II for ground-truth Pareto optimization.*
|
||||
567
studies/m1_mirror_adaptive_V13/run_optimization.py
Normal file
567
studies/m1_mirror_adaptive_V13/run_optimization.py
Normal file
@@ -0,0 +1,567 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
M1 Mirror Pure NSGA-II FEA Optimization V13
|
||||
=============================================
|
||||
|
||||
Pure multi-objective optimization with NSGA-II sampler and FEA only.
|
||||
No neural surrogate - every trial is a real FEA evaluation.
|
||||
|
||||
Key Features:
|
||||
1. NSGA-II sampler for true multi-objective Pareto optimization
|
||||
2. Seeds from V11 + V12 FEA trials (~110+ prior trials)
|
||||
3. No surrogate bias - ground truth only
|
||||
4. 3 objectives: rel_rms_40_vs_20, rel_rms_60_vs_20, mfg_90
|
||||
|
||||
Usage:
|
||||
python run_optimization.py --start
|
||||
python run_optimization.py --start --trials 50
|
||||
python run_optimization.py --start --trials 50 --resume
|
||||
|
||||
For 8-hour overnight run (~55 trials at 8-9 min/trial):
|
||||
python run_optimization.py --start --trials 55
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import argparse
|
||||
import logging
|
||||
import sqlite3
|
||||
import shutil
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
|
||||
# Add parent directories to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
import optuna
|
||||
from optuna.samplers import NSGAIISampler
|
||||
|
||||
# Atomizer imports
|
||||
from optimization_engine.nx_solver import NXSolver
|
||||
from optimization_engine.utils import ensure_nx_running
|
||||
from optimization_engine.extractors import ZernikeExtractor
|
||||
|
||||
# ============================================================================
|
||||
# Paths
|
||||
# ============================================================================
|
||||
|
||||
STUDY_DIR = Path(__file__).parent
|
||||
SETUP_DIR = STUDY_DIR / "1_setup"
|
||||
ITERATIONS_DIR = STUDY_DIR / "2_iterations"
|
||||
RESULTS_DIR = STUDY_DIR / "3_results"
|
||||
CONFIG_PATH = SETUP_DIR / "optimization_config.json"
|
||||
|
||||
# Source studies for seeding
|
||||
V11_DB = STUDY_DIR.parent / "m1_mirror_adaptive_V11" / "3_results" / "study.db"
|
||||
V12_DB = STUDY_DIR.parent / "m1_mirror_adaptive_V12" / "3_results" / "study.db"
|
||||
|
||||
# Ensure directories exist
|
||||
ITERATIONS_DIR.mkdir(exist_ok=True)
|
||||
RESULTS_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# Logging
|
||||
LOG_FILE = RESULTS_DIR / "optimization.log"
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s | %(levelname)-8s | %(message)s',
|
||||
handlers=[
|
||||
logging.StreamHandler(sys.stdout),
|
||||
logging.FileHandler(LOG_FILE, mode='a')
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Objective names
|
||||
# ============================================================================
|
||||
|
||||
OBJ_NAMES = [
|
||||
'rel_filtered_rms_40_vs_20',
|
||||
'rel_filtered_rms_60_vs_20',
|
||||
'mfg_90_optician_workload'
|
||||
]
|
||||
|
||||
DESIGN_VAR_NAMES = [
|
||||
'lateral_inner_angle', 'lateral_outer_angle', 'lateral_outer_pivot',
|
||||
'lateral_inner_pivot', 'lateral_middle_pivot', 'lateral_closeness',
|
||||
'whiffle_min', 'whiffle_outer_to_vertical', 'whiffle_triangle_closeness',
|
||||
'blank_backface_angle', 'inner_circular_rib_dia'
|
||||
]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Prior Data Loader
|
||||
# ============================================================================
|
||||
|
||||
def load_fea_trials_from_db(db_path: Path, label: str) -> List[Dict]:
|
||||
"""Load FEA trials from an Optuna database."""
|
||||
if not db_path.exists():
|
||||
logger.warning(f"{label} database not found: {db_path}")
|
||||
return []
|
||||
|
||||
fea_data = []
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute('''
|
||||
SELECT trial_id, number FROM trials
|
||||
WHERE state = 'COMPLETE'
|
||||
''')
|
||||
trials = cursor.fetchall()
|
||||
|
||||
for trial_id, trial_num in trials:
|
||||
# Get user attributes
|
||||
cursor.execute('''
|
||||
SELECT key, value_json FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
''', (trial_id,))
|
||||
attrs = {row[0]: json.loads(row[1]) for row in cursor.fetchall()}
|
||||
|
||||
# Check if FEA trial (source contains 'FEA')
|
||||
source = attrs.get('source', 'FEA')
|
||||
if 'FEA' not in source:
|
||||
continue # Skip NN trials
|
||||
|
||||
# Get params
|
||||
cursor.execute('''
|
||||
SELECT param_name, param_value FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
''', (trial_id,))
|
||||
params = {name: float(value) for name, value in cursor.fetchall()}
|
||||
|
||||
if not params:
|
||||
continue
|
||||
|
||||
# Get objectives (stored as individual attributes or in 'objectives')
|
||||
objectives = {}
|
||||
if 'objectives' in attrs:
|
||||
objectives = attrs['objectives']
|
||||
else:
|
||||
# Try individual attributes
|
||||
for obj_name in OBJ_NAMES:
|
||||
if obj_name in attrs:
|
||||
objectives[obj_name] = attrs[obj_name]
|
||||
|
||||
if all(k in objectives for k in OBJ_NAMES):
|
||||
fea_data.append({
|
||||
'trial_num': trial_num,
|
||||
'params': params,
|
||||
'objectives': objectives,
|
||||
'source': f'{label}_{source}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading {label} data: {e}")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
logger.info(f"Loaded {len(fea_data)} FEA trials from {label}")
|
||||
return fea_data
|
||||
|
||||
|
||||
def load_all_prior_fea_data() -> List[Dict]:
|
||||
"""Load FEA trials from V11 and V12."""
|
||||
all_data = []
|
||||
|
||||
# V11 data
|
||||
v11_data = load_fea_trials_from_db(V11_DB, "V11")
|
||||
all_data.extend(v11_data)
|
||||
|
||||
# V12 data
|
||||
v12_data = load_fea_trials_from_db(V12_DB, "V12")
|
||||
all_data.extend(v12_data)
|
||||
|
||||
logger.info(f"Total prior FEA trials: {len(all_data)}")
|
||||
return all_data
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# FEA Runner
|
||||
# ============================================================================
|
||||
|
||||
class FEARunner:
|
||||
"""Runs actual FEA simulations."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self.nx_solver = None
|
||||
self.nx_manager = None
|
||||
self.master_model_dir = SETUP_DIR / "model"
|
||||
|
||||
def setup(self):
|
||||
"""Setup NX and solver."""
|
||||
logger.info("Setting up NX session...")
|
||||
|
||||
study_name = self.config.get('study_name', 'm1_mirror_adaptive_V13')
|
||||
|
||||
try:
|
||||
self.nx_manager, nx_was_started = ensure_nx_running(
|
||||
session_id=study_name,
|
||||
auto_start=True,
|
||||
start_timeout=120
|
||||
)
|
||||
logger.info("NX session ready" + (" (started)" if nx_was_started else " (existing)"))
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to setup NX: {e}")
|
||||
raise
|
||||
|
||||
# Initialize solver
|
||||
nx_settings = self.config.get('nx_settings', {})
|
||||
nx_install_dir = nx_settings.get('nx_install_path', 'C:\\Program Files\\Siemens\\NX2506')
|
||||
version_match = re.search(r'NX(\d+)', nx_install_dir)
|
||||
nastran_version = version_match.group(1) if version_match else "2506"
|
||||
|
||||
self.nx_solver = NXSolver(
|
||||
master_model_dir=str(self.master_model_dir),
|
||||
nx_install_dir=nx_install_dir,
|
||||
nastran_version=nastran_version,
|
||||
timeout=nx_settings.get('simulation_timeout_s', 600),
|
||||
use_iteration_folders=True,
|
||||
study_name="m1_mirror_adaptive_V13"
|
||||
)
|
||||
|
||||
def run_fea(self, params: Dict[str, float], trial_num: int) -> Optional[Dict]:
|
||||
"""Run FEA and extract objectives."""
|
||||
if self.nx_solver is None:
|
||||
self.setup()
|
||||
|
||||
logger.info(f" [FEA {trial_num}] Running simulation...")
|
||||
|
||||
expressions = {var['expression_name']: params[var['name']]
|
||||
for var in self.config['design_variables']}
|
||||
|
||||
iter_folder = self.nx_solver.create_iteration_folder(
|
||||
iterations_base_dir=ITERATIONS_DIR,
|
||||
iteration_number=trial_num,
|
||||
expression_updates=expressions
|
||||
)
|
||||
|
||||
try:
|
||||
nx_settings = self.config.get('nx_settings', {})
|
||||
sim_file = iter_folder / nx_settings.get('sim_file', 'ASSY_M1_assyfem1_sim1.sim')
|
||||
|
||||
t_start = time.time()
|
||||
|
||||
result = self.nx_solver.run_simulation(
|
||||
sim_file=sim_file,
|
||||
working_dir=iter_folder,
|
||||
expression_updates=expressions,
|
||||
solution_name=nx_settings.get('solution_name', 'Solution 1'),
|
||||
cleanup=False
|
||||
)
|
||||
|
||||
solve_time = time.time() - t_start
|
||||
|
||||
if not result['success']:
|
||||
logger.error(f" [FEA {trial_num}] Solve failed: {result.get('error')}")
|
||||
return None
|
||||
|
||||
logger.info(f" [FEA {trial_num}] Solved in {solve_time:.1f}s")
|
||||
|
||||
# Extract objectives
|
||||
op2_path = Path(result['op2_file'])
|
||||
objectives = self._extract_objectives(op2_path)
|
||||
|
||||
if objectives is None:
|
||||
return None
|
||||
|
||||
logger.info(f" [FEA {trial_num}] 40-20: {objectives['rel_filtered_rms_40_vs_20']:.2f} nm")
|
||||
logger.info(f" [FEA {trial_num}] 60-20: {objectives['rel_filtered_rms_60_vs_20']:.2f} nm")
|
||||
logger.info(f" [FEA {trial_num}] Mfg: {objectives['mfg_90_optician_workload']:.2f} nm")
|
||||
|
||||
return {
|
||||
'trial_num': trial_num,
|
||||
'params': params,
|
||||
'objectives': objectives,
|
||||
'source': 'FEA',
|
||||
'solve_time': solve_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f" [FEA {trial_num}] Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def _extract_objectives(self, op2_path: Path) -> Optional[Dict[str, float]]:
|
||||
"""Extract objectives using ZernikeExtractor."""
|
||||
try:
|
||||
zernike_settings = self.config.get('zernike_settings', {})
|
||||
|
||||
extractor = ZernikeExtractor(
|
||||
op2_path,
|
||||
bdf_path=None,
|
||||
displacement_unit=zernike_settings.get('displacement_unit', 'mm'),
|
||||
n_modes=zernike_settings.get('n_modes', 50),
|
||||
filter_orders=zernike_settings.get('filter_low_orders', 4)
|
||||
)
|
||||
|
||||
ref = zernike_settings.get('reference_subcase', '2')
|
||||
|
||||
rel_40 = extractor.extract_relative("3", ref)
|
||||
rel_60 = extractor.extract_relative("4", ref)
|
||||
rel_90 = extractor.extract_relative("1", ref)
|
||||
|
||||
return {
|
||||
'rel_filtered_rms_40_vs_20': rel_40['relative_filtered_rms_nm'],
|
||||
'rel_filtered_rms_60_vs_20': rel_60['relative_filtered_rms_nm'],
|
||||
'mfg_90_optician_workload': rel_90['relative_rms_filter_j1to3']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Zernike extraction failed: {e}")
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup NX session."""
|
||||
if self.nx_manager:
|
||||
if self.nx_manager.can_close_nx():
|
||||
self.nx_manager.close_nx_if_allowed()
|
||||
self.nx_manager.cleanup()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# NSGA-II Optimizer
|
||||
# ============================================================================
|
||||
|
||||
class NSGA2Optimizer:
|
||||
"""Pure FEA multi-objective optimizer with NSGA-II."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self.fea_runner = FEARunner(config)
|
||||
|
||||
# Load prior data for seeding
|
||||
self.prior_data = load_all_prior_fea_data()
|
||||
|
||||
# Database
|
||||
self.db_path = RESULTS_DIR / "study.db"
|
||||
self.storage = optuna.storages.RDBStorage(f'sqlite:///{self.db_path}')
|
||||
|
||||
# State
|
||||
self.trial_count = 0
|
||||
self.best_pareto = []
|
||||
|
||||
def _get_next_trial_number(self) -> int:
|
||||
"""Get the next trial number based on existing iterations."""
|
||||
existing = list(ITERATIONS_DIR.glob("iter*"))
|
||||
if not existing:
|
||||
return 1
|
||||
max_num = max(int(p.name.replace("iter", "")) for p in existing)
|
||||
return max_num + 1
|
||||
|
||||
def seed_from_prior(self, study: optuna.Study):
|
||||
"""Seed the study with prior FEA trials."""
|
||||
if not self.prior_data:
|
||||
logger.warning("No prior data to seed from")
|
||||
return
|
||||
|
||||
logger.info(f"Seeding study with {len(self.prior_data)} prior FEA trials...")
|
||||
|
||||
for i, d in enumerate(self.prior_data):
|
||||
try:
|
||||
# Create a trial with the prior data
|
||||
distributions = {}
|
||||
for var in self.config['design_variables']:
|
||||
if var.get('enabled', False):
|
||||
distributions[var['name']] = optuna.distributions.FloatDistribution(
|
||||
var['min'], var['max']
|
||||
)
|
||||
|
||||
# Create frozen trial
|
||||
frozen_trial = optuna.trial.create_trial(
|
||||
params=d['params'],
|
||||
distributions=distributions,
|
||||
values=[
|
||||
d['objectives']['rel_filtered_rms_40_vs_20'],
|
||||
d['objectives']['rel_filtered_rms_60_vs_20'],
|
||||
d['objectives']['mfg_90_optician_workload']
|
||||
],
|
||||
user_attrs={
|
||||
'source': d.get('source', 'prior_FEA'),
|
||||
'rel_filtered_rms_40_vs_20': d['objectives']['rel_filtered_rms_40_vs_20'],
|
||||
'rel_filtered_rms_60_vs_20': d['objectives']['rel_filtered_rms_60_vs_20'],
|
||||
'mfg_90_optician_workload': d['objectives']['mfg_90_optician_workload'],
|
||||
}
|
||||
)
|
||||
|
||||
study.add_trial(frozen_trial)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to seed trial {i}: {e}")
|
||||
|
||||
logger.info(f"Seeded {len(study.trials)} trials")
|
||||
|
||||
def run(self, n_trials: int = 50, resume: bool = False):
|
||||
"""Run NSGA-II optimization."""
|
||||
logger.info("\n" + "=" * 70)
|
||||
logger.info("M1 MIRROR NSGA-II PURE FEA OPTIMIZATION V13")
|
||||
logger.info("=" * 70)
|
||||
logger.info(f"Prior FEA trials: {len(self.prior_data)}")
|
||||
logger.info(f"New trials to run: {n_trials}")
|
||||
logger.info(f"Objectives: {OBJ_NAMES}")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# Create or load study
|
||||
sampler = NSGAIISampler(
|
||||
population_size=self.config.get('nsga2_settings', {}).get('population_size', 20),
|
||||
crossover_prob=self.config.get('nsga2_settings', {}).get('crossover_prob', 0.9),
|
||||
mutation_prob=self.config.get('nsga2_settings', {}).get('mutation_prob', 0.1),
|
||||
seed=42
|
||||
)
|
||||
|
||||
study = optuna.create_study(
|
||||
study_name="v13_nsga2",
|
||||
storage=self.storage,
|
||||
directions=['minimize', 'minimize', 'minimize'], # 3 objectives
|
||||
sampler=sampler,
|
||||
load_if_exists=resume
|
||||
)
|
||||
|
||||
# Seed with prior data if starting fresh
|
||||
if not resume or len(study.trials) == 0:
|
||||
self.seed_from_prior(study)
|
||||
|
||||
self.trial_count = self._get_next_trial_number()
|
||||
logger.info(f"Starting from trial {self.trial_count}")
|
||||
|
||||
# Run optimization
|
||||
def objective(trial: optuna.Trial) -> Tuple[float, float, float]:
|
||||
# Sample parameters
|
||||
params = {}
|
||||
for var in self.config['design_variables']:
|
||||
if var.get('enabled', False):
|
||||
params[var['name']] = trial.suggest_float(var['name'], var['min'], var['max'])
|
||||
|
||||
# Run FEA
|
||||
result = self.fea_runner.run_fea(params, self.trial_count)
|
||||
self.trial_count += 1
|
||||
|
||||
if result is None:
|
||||
# Return worst-case values for failed trials
|
||||
return (1000.0, 1000.0, 1000.0)
|
||||
|
||||
# Store objectives as user attributes
|
||||
trial.set_user_attr('source', 'FEA')
|
||||
trial.set_user_attr('rel_filtered_rms_40_vs_20', result['objectives']['rel_filtered_rms_40_vs_20'])
|
||||
trial.set_user_attr('rel_filtered_rms_60_vs_20', result['objectives']['rel_filtered_rms_60_vs_20'])
|
||||
trial.set_user_attr('mfg_90_optician_workload', result['objectives']['mfg_90_optician_workload'])
|
||||
trial.set_user_attr('solve_time', result.get('solve_time', 0))
|
||||
|
||||
return (
|
||||
result['objectives']['rel_filtered_rms_40_vs_20'],
|
||||
result['objectives']['rel_filtered_rms_60_vs_20'],
|
||||
result['objectives']['mfg_90_optician_workload']
|
||||
)
|
||||
|
||||
# Run
|
||||
try:
|
||||
study.optimize(
|
||||
objective,
|
||||
n_trials=n_trials,
|
||||
show_progress_bar=True,
|
||||
gc_after_trial=True
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("\nOptimization interrupted by user")
|
||||
finally:
|
||||
self.fea_runner.cleanup()
|
||||
|
||||
# Print results
|
||||
elapsed = time.time() - start_time
|
||||
self._print_results(study, elapsed)
|
||||
|
||||
def _print_results(self, study: optuna.Study, elapsed: float):
|
||||
"""Print optimization results."""
|
||||
logger.info("\n" + "=" * 70)
|
||||
logger.info("OPTIMIZATION COMPLETE")
|
||||
logger.info("=" * 70)
|
||||
logger.info(f"Time: {elapsed/60:.1f} min ({elapsed/3600:.2f} hours)")
|
||||
logger.info(f"Total trials: {len(study.trials)}")
|
||||
|
||||
# Get Pareto front
|
||||
pareto_trials = study.best_trials
|
||||
logger.info(f"Pareto-optimal trials: {len(pareto_trials)}")
|
||||
|
||||
# Print Pareto front
|
||||
logger.info("\nPareto Front:")
|
||||
logger.info("-" * 70)
|
||||
logger.info(f"{'Trial':>6} {'40-20 (nm)':>12} {'60-20 (nm)':>12} {'Mfg (nm)':>12}")
|
||||
logger.info("-" * 70)
|
||||
|
||||
pareto_data = []
|
||||
for trial in sorted(pareto_trials, key=lambda t: t.values[0]):
|
||||
logger.info(f"{trial.number:>6} {trial.values[0]:>12.2f} {trial.values[1]:>12.2f} {trial.values[2]:>12.2f}")
|
||||
pareto_data.append({
|
||||
'trial': trial.number,
|
||||
'params': trial.params,
|
||||
'objectives': {
|
||||
'rel_filtered_rms_40_vs_20': trial.values[0],
|
||||
'rel_filtered_rms_60_vs_20': trial.values[1],
|
||||
'mfg_90_optician_workload': trial.values[2]
|
||||
}
|
||||
})
|
||||
|
||||
# Save results
|
||||
results = {
|
||||
'summary': {
|
||||
'total_trials': len(study.trials),
|
||||
'pareto_size': len(pareto_trials),
|
||||
'elapsed_hours': elapsed / 3600
|
||||
},
|
||||
'pareto_front': pareto_data
|
||||
}
|
||||
|
||||
with open(RESULTS_DIR / 'final_results.json', 'w') as f:
|
||||
json.dump(results, f, indent=2)
|
||||
|
||||
logger.info(f"\nResults saved to {RESULTS_DIR / 'final_results.json'}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='M1 Mirror NSGA-II V13')
|
||||
parser.add_argument('--start', action='store_true', help='Start optimization')
|
||||
parser.add_argument('--trials', type=int, default=50, help='Number of new FEA trials')
|
||||
parser.add_argument('--resume', action='store_true', help='Resume from existing study')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.start:
|
||||
print("M1 Mirror NSGA-II Pure FEA Optimization V13")
|
||||
print("=" * 50)
|
||||
print("\nUsage:")
|
||||
print(" python run_optimization.py --start")
|
||||
print(" python run_optimization.py --start --trials 55")
|
||||
print(" python run_optimization.py --start --trials 55 --resume")
|
||||
print("\nFor 8-hour overnight run (~55 trials at 8-9 min/trial):")
|
||||
print(" python run_optimization.py --start --trials 55")
|
||||
print("\nThis will:")
|
||||
print(f" 1. Load ~{107} FEA trials from V11 database")
|
||||
print(f" 2. Load additional FEA trials from V12 database")
|
||||
print(" 3. Seed NSGA-II with all prior FEA data")
|
||||
print(" 4. Run pure FEA multi-objective optimization")
|
||||
print(" 5. No surrogate - every trial is real FEA")
|
||||
return
|
||||
|
||||
with open(CONFIG_PATH, 'r') as f:
|
||||
config = json.load(f)
|
||||
|
||||
optimizer = NSGA2Optimizer(config)
|
||||
optimizer.run(n_trials=args.trials, resume=args.resume)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user