feat: Add AtomizerField training data export and intelligent model discovery

Major additions:
- Training data export system for AtomizerField neural network training
- Bracket stiffness optimization study with 50+ training samples
- Intelligent NX model discovery (auto-detect solutions, expressions, mesh)
- Result extractors module for displacement, stress, frequency, mass
- User-generated NX journals for advanced workflows
- Archive structure for legacy scripts and test outputs
- Protocol documentation and dashboard launcher

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-26 12:01:50 -05:00
parent a0c008a593
commit 2b3573ec42
949 changed files with 1405144 additions and 470 deletions

View File

@@ -0,0 +1,164 @@
"""
Test script for Protocol 10 v2.0 Adaptive Characterization.
This script demonstrates the new adaptive characterization feature that
intelligently determines when enough landscape exploration has been done.
Expected behavior:
- Simple problems: Stop at ~10-15 trials
- Complex problems: Continue to ~20-30 trials
"""
import numpy as np
import optuna
from pathlib import Path
from optimization_engine.adaptive_characterization import CharacterizationStoppingCriterion
from optimization_engine.landscape_analyzer import LandscapeAnalyzer
def simple_smooth_function(trial):
"""Simple smooth quadratic function (should stop early ~10-15 trials)."""
x = trial.suggest_float('x', -10, 10)
y = trial.suggest_float('y', -10, 10)
# Simple quadratic bowl
return (x - 3)**2 + (y + 2)**2
def complex_multimodal_function(trial):
"""Complex multimodal function (should need more trials ~20-30)."""
x = trial.suggest_float('x', -5, 5)
y = trial.suggest_float('y', -5, 5)
# Rastrigin function (multimodal, many local minima)
A = 10
n = 2
return A * n + ((x**2 - A * np.cos(2 * np.pi * x)) +
(y**2 - A * np.cos(2 * np.pi * y)))
def test_adaptive_characterization(
objective_function,
function_name: str,
expected_trials_range: tuple
):
"""Test adaptive characterization on a given function."""
print(f"\n{'='*70}")
print(f" TESTING: {function_name}")
print(f" Expected trials: {expected_trials_range[0]}-{expected_trials_range[1]}")
print(f"{'='*70}\n")
# Setup tracking directory
tracking_dir = Path(f"test_results/adaptive_char_{function_name.lower().replace(' ', '_')}")
tracking_dir.mkdir(parents=True, exist_ok=True)
# Create components
analyzer = LandscapeAnalyzer(min_trials_for_analysis=10)
stopping_criterion = CharacterizationStoppingCriterion(
min_trials=10,
max_trials=30,
confidence_threshold=0.85,
check_interval=5,
verbose=True,
tracking_dir=tracking_dir
)
# Create study
study = optuna.create_study(
study_name=f"test_{function_name.lower().replace(' ', '_')}",
direction='minimize',
sampler=optuna.samplers.RandomSampler()
)
# Run adaptive characterization
check_interval = 5
while not stopping_criterion.should_stop(study):
# Run batch of trials
study.optimize(objective_function, n_trials=check_interval)
# Analyze landscape
landscape = analyzer.analyze(study)
# Update stopping criterion
if landscape.get('ready', False):
completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
stopping_criterion.update(landscape, len(completed_trials))
# Print results
completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
actual_trials = len(completed_trials)
print(stopping_criterion.get_summary_report())
# Verify expectation
in_range = expected_trials_range[0] <= actual_trials <= expected_trials_range[1]
status = "PASS" if in_range else "FAIL"
print(f"\n{'='*70}")
print(f" RESULT: {status}")
print(f" Actual trials: {actual_trials}")
print(f" Expected range: {expected_trials_range[0]}-{expected_trials_range[1]}")
print(f" In range: {'YES' if in_range else 'NO'}")
print(f" Stop reason: {stopping_criterion.stop_reason}")
print(f" Final confidence: {stopping_criterion.final_confidence:.1%}")
print(f"{'='*70}\n")
return {
'function': function_name,
'expected_range': expected_trials_range,
'actual_trials': actual_trials,
'in_range': in_range,
'stop_reason': stopping_criterion.stop_reason,
'confidence': stopping_criterion.final_confidence
}
def main():
"""Run all adaptive characterization tests."""
print("\n" + "="*70)
print(" PROTOCOL 10 v2.0: ADAPTIVE CHARACTERIZATION TESTS")
print("="*70)
results = []
# Test 1: Simple smooth function (should stop early)
result1 = test_adaptive_characterization(
objective_function=simple_smooth_function,
function_name="Simple Smooth Quadratic",
expected_trials_range=(10, 20)
)
results.append(result1)
# Test 2: Complex multimodal function (should need more trials)
result2 = test_adaptive_characterization(
objective_function=complex_multimodal_function,
function_name="Complex Multimodal (Rastrigin)",
expected_trials_range=(15, 30)
)
results.append(result2)
# Summary
print("\n" + "="*70)
print(" TEST SUMMARY")
print("="*70)
for result in results:
status = "PASS" if result['in_range'] else "FAIL"
print(f"\n [{status}] {result['function']}")
print(f" Expected: {result['expected_range'][0]}-{result['expected_range'][1]} trials")
print(f" Actual: {result['actual_trials']} trials")
print(f" Confidence: {result['confidence']:.1%}")
# Overall pass/fail
all_passed = all(r['in_range'] for r in results)
overall_status = "ALL TESTS PASSED" if all_passed else "SOME TESTS FAILED"
print(f"\n{'='*70}")
print(f" {overall_status}")
print(f"{'='*70}\n")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,74 @@
# Backend Testing Guide
## 1. Start Backend Server
```bash
cd atomizer-dashboard/backend
python -m uvicorn api.main:app --reload --port 8000
```
## 2. Test REST Endpoints
### Get Study Status
```bash
curl http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/status
```
### Get Pareto Front
```bash
curl http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/pareto-front
```
### Get Trial History
```bash
curl http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/trials
```
### Generate HTML Report
```bash
curl -X POST "http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/generate-report?format=html"
```
### List Studies
```bash
curl http://localhost:8000/api/optimization/studies
```
## 3. Test WebSocket (Browser Console)
Open browser to `http://localhost:8000` and run in console:
```javascript
const ws = new WebSocket('ws://localhost:8000/api/ws/optimization/bracket_stiffness_optimization_V3');
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('Received:', data);
};
ws.onopen = () => console.log('Connected to optimization stream');
ws.onerror = (error) => console.error('WebSocket error:', error);
```
You should see a `connected` message with current trial count.
## 4. Test Mesh Conversion (If Nastran Files Available)
```bash
curl -X POST http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/convert-mesh
```
## 5. Download Generated Report
After generating report, download it:
```bash
curl http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/reports/optimization_report.html -o test_report.html
```
## Expected Results
- **Status endpoint**: Should return study config, trial counts, best values
- **Pareto front**: Should return 48 Pareto-optimal solutions
- **Trials endpoint**: Should return all 100 trial records
- **Report generation**: Should create HTML file in `studies/bracket_stiffness_optimization_V3/2_results/reports/`
- **WebSocket**: Should show connected message with current_trials = 100

View File

@@ -0,0 +1,100 @@
# Frontend Integration Testing Guide
## 1. Start Both Servers
### Terminal 1 - Backend
```bash
cd atomizer-dashboard/backend
python -m uvicorn api.main:app --reload --port 8000
```
### Terminal 2 - Frontend
```bash
cd atomizer-dashboard/frontend
npm run dev
```
Frontend will be at: `http://localhost:3003`
## 2. Test API Integration
The frontend should be able to:
### Fetch Studies List
```typescript
fetch('http://localhost:8000/api/optimization/studies')
.then(r => r.json())
.then(data => console.log('Studies:', data));
```
### Get Study Status
```typescript
fetch('http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/status')
.then(r => r.json())
.then(data => console.log('Status:', data));
```
### Connect WebSocket
```typescript
const ws = new WebSocket('ws://localhost:8000/api/ws/optimization/bracket_stiffness_optimization_V3');
ws.onmessage = (event) => {
const message = JSON.parse(event.data);
console.log('Message type:', message.type);
console.log('Data:', message.data);
};
```
## 3. Frontend Development Tasks
Now your frontend developer can implement:
### Phase 1: Basic Study Viewing
- Studies list page
- Study detail page with current status
- Trial history table
### Phase 2: Real-Time Updates
- WebSocket connection manager
- Live trial updates in UI
- Progress bar updates
- "New Best" notifications
### Phase 3: Pareto Front Visualization
- Scatter plot of Pareto solutions
- Interactive filtering
- Solution comparison
### Phase 4: 3D Visualization
- GLTF model viewer (Three.js / react-three-fiber)
- Load mesh from `/api/optimization/studies/{id}/mesh/model.gltf`
- Color-coded stress/displacement display
### Phase 5: Report Generation
- Report generation buttons
- Download generated reports
- Preview HTML reports in-browser
## 4. Test Data Available
**bracket_stiffness_optimization_V3** has:
- 100 completed trials
- 48 Pareto-optimal solutions
- Multi-objective: minimize mass + maximize stiffness
- Design variables: rib_thickness_1, rib_thickness_2, rib_thickness_3, base_thickness
Perfect for testing all dashboard features.
## 5. API Endpoints Reference
All endpoints are documented in the technical summary provided earlier.
Key endpoints:
- `GET /api/optimization/studies` - List all studies
- `GET /api/optimization/studies/{id}/status` - Get study status
- `GET /api/optimization/studies/{id}/trials` - Get trial history
- `GET /api/optimization/studies/{id}/pareto-front` - Get Pareto solutions
- `POST /api/optimization/studies/{id}/generate-report` - Generate report
- `WS /api/ws/optimization/{id}` - WebSocket stream
All support CORS and are ready for React integration.

View File

@@ -0,0 +1,58 @@
"""Test neural surrogate integration."""
import time
from optimization_engine.neural_surrogate import create_surrogate_for_study
print("Testing Neural Surrogate Integration")
print("=" * 60)
# Create surrogate with auto-detection
surrogate = create_surrogate_for_study()
if surrogate is None:
print("ERROR: Failed to create surrogate")
exit(1)
print(f"Surrogate created successfully!")
print(f" Device: {surrogate.device}")
print(f" Nodes: {surrogate.num_nodes}")
print(f" Model val_loss: {surrogate.best_val_loss:.4f}")
# Test prediction
test_params = {
"beam_half_core_thickness": 7.0,
"beam_face_thickness": 3.0,
"holes_diameter": 40.0,
"hole_count": 10.0
}
print(f"\nTest prediction with params: {test_params}")
results = surrogate.predict(test_params)
print(f"\nResults:")
print(f" Max displacement: {results['max_displacement']:.6f} mm")
print(f" Max stress: {results['max_stress']:.2f} (approx)")
print(f" Inference time: {results['inference_time_ms']:.2f} ms")
# Speed test
n = 100
start = time.time()
for _ in range(n):
surrogate.predict(test_params)
elapsed = time.time() - start
print(f"\nSpeed test: {n} predictions in {elapsed:.3f}s")
print(f" Average: {elapsed/n*1000:.2f} ms per prediction")
# Compare with FEA expectation
# From training data, typical max_displacement is ~0.02-0.03 mm
print(f"\nExpected range (from training data):")
print(f" Max displacement: ~0.02-0.03 mm")
print(f" Max stress: ~200-300 MPa")
stats = surrogate.get_statistics()
print(f"\nStatistics:")
print(f" Total predictions: {stats['total_predictions']}")
print(f" Average time: {stats['average_time_ms']:.2f} ms")
print("\nNeural surrogate test PASSED!")

View File

@@ -0,0 +1,122 @@
# New Optimization Testing Guide
## Test Real-Time Dashboard with Active Optimization
This will let you see the WebSocket updates in real-time as trials complete.
## 1. Start Dashboard (Both Servers)
### Terminal 1 - Backend
```bash
cd atomizer-dashboard/backend
python -m uvicorn api.main:app --reload --port 8000
```
### Terminal 2 - Frontend
```bash
cd atomizer-dashboard/frontend
npm run dev
```
Visit: `http://localhost:3003`
## 2. Connect WebSocket to Existing Study
Open browser console and run:
```javascript
const ws = new WebSocket('ws://localhost:8000/api/ws/optimization/bracket_stiffness_optimization_V3');
ws.onmessage = (event) => {
const message = JSON.parse(event.data);
console.log(`[${message.type}]`, message.data);
};
ws.onopen = () => console.log('✓ Connected to optimization stream');
```
You should see:
```
✓ Connected to optimization stream
[connected] {study_id: "bracket_stiffness_optimization_V3", current_trials: 100, ...}
```
## 3. Start a Small Optimization Run (5 Trials)
### Terminal 3 - Run Optimization
```bash
cd studies/bracket_stiffness_optimization_V3
python run_optimization.py --trials 5
```
## 4. Watch Real-Time Events
As trials complete, you'll see WebSocket events:
```javascript
// Trial completed
[trial_completed] {
trial_number: 101,
objective: 0.0234,
params: {rib_thickness_1: 2.3, ...},
...
}
// Progress update
[progress] {
current: 101,
total: 105,
percentage: 96.19
}
// New best found (if better than previous)
[new_best] {
trial_number: 103,
objective: 0.0198,
...
}
// Pareto front update (multi-objective)
[pareto_front] {
pareto_front: [{...}, {...}],
count: 49
}
```
## 5. Test Report Generation While Running
While optimization is running, generate a report:
```bash
curl -X POST "http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/generate-report?format=html"
```
Then download it:
```bash
curl http://localhost:8000/api/optimization/studies/bracket_stiffness_optimization_V3/reports/optimization_report.html -o report.html
```
Open `report.html` in browser to see formatted report with all 100+ trials.
## 6. Expected Behavior
- WebSocket receives events as trials complete (2-5 minute intervals per trial)
- Progress percentage updates
- Pareto front grows if new non-dominated solutions found
- Report can be generated at any point during optimization
- All endpoints remain responsive during optimization
## 7. Production Testing
For full production test:
```bash
python run_optimization.py --trials 50
```
This will run for several hours and provide extensive real-time data for dashboard testing.
## Notes
- Each trial takes 2-5 minutes (NX simulation solve time)
- WebSocket will broadcast updates immediately upon trial completion
- Frontend should handle all 6 event types gracefully
- Reports update dynamically as new trials complete

View File

@@ -0,0 +1,35 @@
"""Test neural surrogate integration"""
import sys
from pathlib import Path
# Add project paths
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))
sys.path.insert(0, str(project_root / 'atomizer-field'))
from optimization_engine.neural_surrogate import create_parametric_surrogate_for_study
# Create surrogate
print("Creating parametric surrogate...")
surrogate = create_parametric_surrogate_for_study(project_root=project_root)
if surrogate:
print('Surrogate created successfully!')
print(f'Design vars: {surrogate.design_var_names}')
print(f'Number of nodes: {surrogate.num_nodes}')
# Test prediction with example params
test_params = {name: 2.0 for name in surrogate.design_var_names}
print(f'\nTest params: {test_params}')
results = surrogate.predict(test_params)
print(f'\nTest prediction:')
print(f' Mass: {results["mass"]:.2f}')
print(f' Frequency: {results["frequency"]:.2f}')
print(f' Max Displacement: {results["max_displacement"]:.6f}')
print(f' Max Stress: {results["max_stress"]:.2f}')
print(f' Inference time: {results["inference_time_ms"]:.2f} ms')
print('\nSurrogate is ready for use in optimization!')
else:
print('Failed to create surrogate')

View File

@@ -0,0 +1,61 @@
"""Test parametric surrogate integration."""
import time
from optimization_engine.neural_surrogate import create_parametric_surrogate_for_study
print("Testing Parametric Neural Surrogate")
print("=" * 60)
# Create surrogate with auto-detection
surrogate = create_parametric_surrogate_for_study()
if surrogate is None:
print("ERROR: Failed to create surrogate")
exit(1)
print(f"Surrogate created successfully!")
print(f" Device: {surrogate.device}")
print(f" Nodes: {surrogate.num_nodes}")
print(f" Model val_loss: {surrogate.best_val_loss:.4f}")
print(f" Design vars: {surrogate.design_var_names}")
# Test prediction with example params
test_params = {
"beam_half_core_thickness": 7.0,
"beam_face_thickness": 2.5,
"holes_diameter": 35.0,
"hole_count": 10.0
}
print(f"\nTest prediction with params: {test_params}")
results = surrogate.predict(test_params)
print(f"\nResults:")
print(f" Mass: {results['mass']:.2f} g")
print(f" Frequency: {results['frequency']:.2f} Hz")
print(f" Max displacement: {results['max_displacement']:.6f} mm")
print(f" Max stress: {results['max_stress']:.2f} MPa")
print(f" Inference time: {results['inference_time_ms']:.2f} ms")
# Speed test
n = 100
start = time.time()
for _ in range(n):
surrogate.predict(test_params)
elapsed = time.time() - start
print(f"\nSpeed test: {n} predictions in {elapsed:.3f}s")
print(f" Average: {elapsed/n*1000:.2f} ms per prediction")
# Compare with training data range
print(f"\nExpected range (from training data):")
print(f" Mass: ~2808 - 5107 g")
print(f" Frequency: ~15.8 - 21.9 Hz")
print(f" Max displacement: ~0.02-0.03 mm")
stats = surrogate.get_statistics()
print(f"\nStatistics:")
print(f" Total predictions: {stats['total_predictions']}")
print(f" Average time: {stats['average_time_ms']:.2f} ms")
print("\nParametric surrogate test PASSED!")

View File

@@ -0,0 +1,139 @@
"""
Test script for training data export functionality.
Creates a simple beam optimization study with training data export enabled
to verify end-to-end functionality of AtomizerField training data collection.
"""
import json
import shutil
from pathlib import Path
# Configuration for test study with training data export
test_config = {
"study_name": "training_data_export_test",
"sim_file": "examples/Models/Circular Plate/Circular_Plate.sim",
"fem_file": "examples/Models/Circular Plate/Circular_Plate_fem1.fem",
"design_variables": [
{
"name": "thickness",
"expression_name": "thickness",
"min": 2.0,
"max": 8.0
},
{
"name": "radius",
"expression_name": "radius",
"min": 80.0,
"max": 120.0
}
],
"objectives": [
{
"name": "max_stress",
"type": "minimize",
"extractor": {
"type": "result_parameter",
"parameter_name": "Max Von Mises Stress"
}
},
{
"name": "mass",
"type": "minimize",
"extractor": {
"type": "expression",
"expression_name": "mass"
}
}
],
"constraints": [
{
"name": "stress_limit",
"type": "less_than",
"value": 300.0,
"extractor": {
"type": "result_parameter",
"parameter_name": "Max Von Mises Stress"
}
}
],
"optimization": {
"algorithm": "NSGA-II",
"n_trials": 10,
"population_size": 4
},
# Enable training data export
"training_data_export": {
"enabled": True,
"export_dir": "atomizer_field_training_data/test_study_001"
},
"version": "1.0"
}
def main():
"""Run test study with training data export."""
# Create study directory
study_dir = Path("studies/training_data_export_test")
study_dir.mkdir(parents=True, exist_ok=True)
setup_dir = study_dir / "1_setup"
setup_dir.mkdir(exist_ok=True)
results_dir = study_dir / "2_results"
results_dir.mkdir(exist_ok=True)
# Save workflow config
config_path = setup_dir / "workflow_config.json"
with open(config_path, 'w') as f:
json.dump(test_config, f, indent=2)
print("=" * 80)
print("TRAINING DATA EXPORT TEST STUDY")
print("=" * 80)
print(f"\nStudy created: {study_dir}")
print(f"Config saved: {config_path}")
print(f"\nTraining data will be exported to:")
print(f" {test_config['training_data_export']['export_dir']}")
print(f"\nNumber of trials: {test_config['optimization']['n_trials']}")
print("\n" + "=" * 80)
print("To run the test:")
print(f" cd {study_dir}")
print(" python run_optimization.py")
print("=" * 80)
# Create run_optimization.py in study directory
run_script = study_dir / "run_optimization.py"
run_script_content = '''"""Run optimization for training data export test."""
import sys
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from optimization_engine.runner import OptimizationRunner
def main():
"""Run the optimization."""
config_path = Path(__file__).parent / "1_setup" / "workflow_config.json"
runner = OptimizationRunner(config_path)
runner.run()
if __name__ == "__main__":
main()
'''
with open(run_script, 'w') as f:
f.write(run_script_content)
print(f"\nRun script created: {run_script}")
print("\nTest study setup complete!")
if __name__ == "__main__":
main()