refactor: Reorganize code structure and create tests directory

- Consolidate surrogates module to processors/surrogates/
- Move ensemble_surrogate.py to proper location
- Add deprecation shim for old import path
- Create tests/ directory with pytest structure
- Move test files from archive/test_scripts/
- Add conftest.py with shared fixtures

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-07 09:01:37 -05:00
parent 155e2a1b8e
commit 7bdb74f93b
9 changed files with 61 additions and 10 deletions

View File

@@ -1,164 +0,0 @@
"""
Test script for Protocol 10 v2.0 Adaptive Characterization.
This script demonstrates the new adaptive characterization feature that
intelligently determines when enough landscape exploration has been done.
Expected behavior:
- Simple problems: Stop at ~10-15 trials
- Complex problems: Continue to ~20-30 trials
"""
import numpy as np
import optuna
from pathlib import Path
from optimization_engine.processors.adaptive_characterization import CharacterizationStoppingCriterion
from optimization_engine.reporting.landscape_analyzer import LandscapeAnalyzer
def simple_smooth_function(trial):
"""Simple smooth quadratic function (should stop early ~10-15 trials)."""
x = trial.suggest_float('x', -10, 10)
y = trial.suggest_float('y', -10, 10)
# Simple quadratic bowl
return (x - 3)**2 + (y + 2)**2
def complex_multimodal_function(trial):
"""Complex multimodal function (should need more trials ~20-30)."""
x = trial.suggest_float('x', -5, 5)
y = trial.suggest_float('y', -5, 5)
# Rastrigin function (multimodal, many local minima)
A = 10
n = 2
return A * n + ((x**2 - A * np.cos(2 * np.pi * x)) +
(y**2 - A * np.cos(2 * np.pi * y)))
def test_adaptive_characterization(
objective_function,
function_name: str,
expected_trials_range: tuple
):
"""Test adaptive characterization on a given function."""
print(f"\n{'='*70}")
print(f" TESTING: {function_name}")
print(f" Expected trials: {expected_trials_range[0]}-{expected_trials_range[1]}")
print(f"{'='*70}\n")
# Setup tracking directory
tracking_dir = Path(f"test_results/adaptive_char_{function_name.lower().replace(' ', '_')}")
tracking_dir.mkdir(parents=True, exist_ok=True)
# Create components
analyzer = LandscapeAnalyzer(min_trials_for_analysis=10)
stopping_criterion = CharacterizationStoppingCriterion(
min_trials=10,
max_trials=30,
confidence_threshold=0.85,
check_interval=5,
verbose=True,
tracking_dir=tracking_dir
)
# Create study
study = optuna.create_study(
study_name=f"test_{function_name.lower().replace(' ', '_')}",
direction='minimize',
sampler=optuna.samplers.RandomSampler()
)
# Run adaptive characterization
check_interval = 5
while not stopping_criterion.should_stop(study):
# Run batch of trials
study.optimize(objective_function, n_trials=check_interval)
# Analyze landscape
landscape = analyzer.analyze(study)
# Update stopping criterion
if landscape.get('ready', False):
completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
stopping_criterion.update(landscape, len(completed_trials))
# Print results
completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
actual_trials = len(completed_trials)
print(stopping_criterion.get_summary_report())
# Verify expectation
in_range = expected_trials_range[0] <= actual_trials <= expected_trials_range[1]
status = "PASS" if in_range else "FAIL"
print(f"\n{'='*70}")
print(f" RESULT: {status}")
print(f" Actual trials: {actual_trials}")
print(f" Expected range: {expected_trials_range[0]}-{expected_trials_range[1]}")
print(f" In range: {'YES' if in_range else 'NO'}")
print(f" Stop reason: {stopping_criterion.stop_reason}")
print(f" Final confidence: {stopping_criterion.final_confidence:.1%}")
print(f"{'='*70}\n")
return {
'function': function_name,
'expected_range': expected_trials_range,
'actual_trials': actual_trials,
'in_range': in_range,
'stop_reason': stopping_criterion.stop_reason,
'confidence': stopping_criterion.final_confidence
}
def main():
"""Run all adaptive characterization tests."""
print("\n" + "="*70)
print(" PROTOCOL 10 v2.0: ADAPTIVE CHARACTERIZATION TESTS")
print("="*70)
results = []
# Test 1: Simple smooth function (should stop early)
result1 = test_adaptive_characterization(
objective_function=simple_smooth_function,
function_name="Simple Smooth Quadratic",
expected_trials_range=(10, 20)
)
results.append(result1)
# Test 2: Complex multimodal function (should need more trials)
result2 = test_adaptive_characterization(
objective_function=complex_multimodal_function,
function_name="Complex Multimodal (Rastrigin)",
expected_trials_range=(15, 30)
)
results.append(result2)
# Summary
print("\n" + "="*70)
print(" TEST SUMMARY")
print("="*70)
for result in results:
status = "PASS" if result['in_range'] else "FAIL"
print(f"\n [{status}] {result['function']}")
print(f" Expected: {result['expected_range'][0]}-{result['expected_range'][1]} trials")
print(f" Actual: {result['actual_trials']} trials")
print(f" Confidence: {result['confidence']:.1%}")
# Overall pass/fail
all_passed = all(r['in_range'] for r in results)
overall_status = "ALL TESTS PASSED" if all_passed else "SOME TESTS FAILED"
print(f"\n{'='*70}")
print(f" {overall_status}")
print(f"{'='*70}\n")
if __name__ == "__main__":
main()

View File

@@ -1,58 +0,0 @@
"""Test neural surrogate integration."""
import time
from optimization_engine.processors.surrogates.neural_surrogate import create_surrogate_for_study
print("Testing Neural Surrogate Integration")
print("=" * 60)
# Create surrogate with auto-detection
surrogate = create_surrogate_for_study()
if surrogate is None:
print("ERROR: Failed to create surrogate")
exit(1)
print(f"Surrogate created successfully!")
print(f" Device: {surrogate.device}")
print(f" Nodes: {surrogate.num_nodes}")
print(f" Model val_loss: {surrogate.best_val_loss:.4f}")
# Test prediction
test_params = {
"beam_half_core_thickness": 7.0,
"beam_face_thickness": 3.0,
"holes_diameter": 40.0,
"hole_count": 10.0
}
print(f"\nTest prediction with params: {test_params}")
results = surrogate.predict(test_params)
print(f"\nResults:")
print(f" Max displacement: {results['max_displacement']:.6f} mm")
print(f" Max stress: {results['max_stress']:.2f} (approx)")
print(f" Inference time: {results['inference_time_ms']:.2f} ms")
# Speed test
n = 100
start = time.time()
for _ in range(n):
surrogate.predict(test_params)
elapsed = time.time() - start
print(f"\nSpeed test: {n} predictions in {elapsed:.3f}s")
print(f" Average: {elapsed/n*1000:.2f} ms per prediction")
# Compare with FEA expectation
# From training data, typical max_displacement is ~0.02-0.03 mm
print(f"\nExpected range (from training data):")
print(f" Max displacement: ~0.02-0.03 mm")
print(f" Max stress: ~200-300 MPa")
stats = surrogate.get_statistics()
print(f"\nStatistics:")
print(f" Total predictions: {stats['total_predictions']}")
print(f" Average time: {stats['average_time_ms']:.2f} ms")
print("\nNeural surrogate test PASSED!")

View File

@@ -1,35 +0,0 @@
"""Test neural surrogate integration"""
import sys
from pathlib import Path
# Add project paths
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))
sys.path.insert(0, str(project_root / 'atomizer-field'))
from optimization_engine.processors.surrogates.neural_surrogate import create_parametric_surrogate_for_study
# Create surrogate
print("Creating parametric surrogate...")
surrogate = create_parametric_surrogate_for_study(project_root=project_root)
if surrogate:
print('Surrogate created successfully!')
print(f'Design vars: {surrogate.design_var_names}')
print(f'Number of nodes: {surrogate.num_nodes}')
# Test prediction with example params
test_params = {name: 2.0 for name in surrogate.design_var_names}
print(f'\nTest params: {test_params}')
results = surrogate.predict(test_params)
print(f'\nTest prediction:')
print(f' Mass: {results["mass"]:.2f}')
print(f' Frequency: {results["frequency"]:.2f}')
print(f' Max Displacement: {results["max_displacement"]:.6f}')
print(f' Max Stress: {results["max_stress"]:.2f}')
print(f' Inference time: {results["inference_time_ms"]:.2f} ms')
print('\nSurrogate is ready for use in optimization!')
else:
print('Failed to create surrogate')

View File

@@ -1,61 +0,0 @@
"""Test parametric surrogate integration."""
import time
from optimization_engine.processors.surrogates.neural_surrogate import create_parametric_surrogate_for_study
print("Testing Parametric Neural Surrogate")
print("=" * 60)
# Create surrogate with auto-detection
surrogate = create_parametric_surrogate_for_study()
if surrogate is None:
print("ERROR: Failed to create surrogate")
exit(1)
print(f"Surrogate created successfully!")
print(f" Device: {surrogate.device}")
print(f" Nodes: {surrogate.num_nodes}")
print(f" Model val_loss: {surrogate.best_val_loss:.4f}")
print(f" Design vars: {surrogate.design_var_names}")
# Test prediction with example params
test_params = {
"beam_half_core_thickness": 7.0,
"beam_face_thickness": 2.5,
"holes_diameter": 35.0,
"hole_count": 10.0
}
print(f"\nTest prediction with params: {test_params}")
results = surrogate.predict(test_params)
print(f"\nResults:")
print(f" Mass: {results['mass']:.2f} g")
print(f" Frequency: {results['frequency']:.2f} Hz")
print(f" Max displacement: {results['max_displacement']:.6f} mm")
print(f" Max stress: {results['max_stress']:.2f} MPa")
print(f" Inference time: {results['inference_time_ms']:.2f} ms")
# Speed test
n = 100
start = time.time()
for _ in range(n):
surrogate.predict(test_params)
elapsed = time.time() - start
print(f"\nSpeed test: {n} predictions in {elapsed:.3f}s")
print(f" Average: {elapsed/n*1000:.2f} ms per prediction")
# Compare with training data range
print(f"\nExpected range (from training data):")
print(f" Mass: ~2808 - 5107 g")
print(f" Frequency: ~15.8 - 21.9 Hz")
print(f" Max displacement: ~0.02-0.03 mm")
stats = surrogate.get_statistics()
print(f"\nStatistics:")
print(f" Total predictions: {stats['total_predictions']}")
print(f" Average time: {stats['average_time_ms']:.2f} ms")
print("\nParametric surrogate test PASSED!")

View File

@@ -1,139 +0,0 @@
"""
Test script for training data export functionality.
Creates a simple beam optimization study with training data export enabled
to verify end-to-end functionality of AtomizerField training data collection.
"""
import json
import shutil
from pathlib import Path
# Configuration for test study with training data export
test_config = {
"study_name": "training_data_export_test",
"sim_file": "examples/Models/Circular Plate/Circular_Plate.sim",
"fem_file": "examples/Models/Circular Plate/Circular_Plate_fem1.fem",
"design_variables": [
{
"name": "thickness",
"expression_name": "thickness",
"min": 2.0,
"max": 8.0
},
{
"name": "radius",
"expression_name": "radius",
"min": 80.0,
"max": 120.0
}
],
"objectives": [
{
"name": "max_stress",
"type": "minimize",
"extractor": {
"type": "result_parameter",
"parameter_name": "Max Von Mises Stress"
}
},
{
"name": "mass",
"type": "minimize",
"extractor": {
"type": "expression",
"expression_name": "mass"
}
}
],
"constraints": [
{
"name": "stress_limit",
"type": "less_than",
"value": 300.0,
"extractor": {
"type": "result_parameter",
"parameter_name": "Max Von Mises Stress"
}
}
],
"optimization": {
"algorithm": "NSGA-II",
"n_trials": 10,
"population_size": 4
},
# Enable training data export
"training_data_export": {
"enabled": True,
"export_dir": "atomizer_field_training_data/test_study_001"
},
"version": "1.0"
}
def main():
"""Run test study with training data export."""
# Create study directory
study_dir = Path("studies/training_data_export_test")
study_dir.mkdir(parents=True, exist_ok=True)
setup_dir = study_dir / "1_setup"
setup_dir.mkdir(exist_ok=True)
results_dir = study_dir / "2_results"
results_dir.mkdir(exist_ok=True)
# Save workflow config
config_path = setup_dir / "workflow_config.json"
with open(config_path, 'w') as f:
json.dump(test_config, f, indent=2)
print("=" * 80)
print("TRAINING DATA EXPORT TEST STUDY")
print("=" * 80)
print(f"\nStudy created: {study_dir}")
print(f"Config saved: {config_path}")
print(f"\nTraining data will be exported to:")
print(f" {test_config['training_data_export']['export_dir']}")
print(f"\nNumber of trials: {test_config['optimization']['n_trials']}")
print("\n" + "=" * 80)
print("To run the test:")
print(f" cd {study_dir}")
print(" python run_optimization.py")
print("=" * 80)
# Create run_optimization.py in study directory
run_script = study_dir / "run_optimization.py"
run_script_content = '''"""Run optimization for training data export test."""
import sys
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from optimization_engine.core.runner import OptimizationRunner
def main():
"""Run the optimization."""
config_path = Path(__file__).parent / "1_setup" / "workflow_config.json"
runner = OptimizationRunner(config_path)
runner.run()
if __name__ == "__main__":
main()
'''
with open(run_script, 'w') as f:
f.write(run_script_content)
print(f"\nRun script created: {run_script}")
print("\nTest study setup complete!")
if __name__ == "__main__":
main()