docs: Reorganize documentation structure
- Create DEVELOPMENT.md for tactical development tracking - Simplify README.md to user-focused overview - Streamline DEVELOPMENT_ROADMAP.md to focus on vision - All docs now properly cross-referenced Documentation now has clear separation: - README: User overview - DEVELOPMENT: Tactical todos and status - ROADMAP: Strategic vision - CHANGELOG: Version history
This commit is contained in:
152
tests/test_hooks_with_bracket.py
Normal file
152
tests/test_hooks_with_bracket.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
Test: Validate Hook System with Bracket Optimization
|
||||
|
||||
Quick test (3 trials) to verify:
|
||||
1. Hooks load correctly
|
||||
2. Hooks execute at proper points
|
||||
3. Optimization still works with hooks enabled
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add project root to path so we can import atomizer_paths
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
# Use intelligent path resolution
|
||||
import atomizer_paths
|
||||
atomizer_paths.ensure_imports()
|
||||
|
||||
from optimization_engine.runner import OptimizationRunner
|
||||
from optimization_engine.result_extractors.extractors import (
|
||||
stress_extractor,
|
||||
displacement_extractor
|
||||
)
|
||||
|
||||
|
||||
# Dummy functions for quick testing (no actual NX calls)
|
||||
_trial_count = 0
|
||||
|
||||
def dummy_model_updater(design_vars: dict):
|
||||
"""Simulate model update."""
|
||||
print(f"\n[MODEL UPDATE] Design variables prepared")
|
||||
for name, value in design_vars.items():
|
||||
print(f" {name} = {value:.4f}")
|
||||
|
||||
|
||||
def dummy_simulation_runner() -> Path:
|
||||
"""Simulate simulation run - return existing OP2 file."""
|
||||
global _trial_count
|
||||
_trial_count += 1
|
||||
|
||||
# Use existing OP2 file from bracket study
|
||||
op2_file = atomizer_paths.studies() / "bracket_stress_minimization/optimization_results/bracket_sim1-solution_1.op2"
|
||||
|
||||
if not op2_file.exists():
|
||||
raise FileNotFoundError(
|
||||
f"Test OP2 file not found: {op2_file}\n"
|
||||
"Please run a real solve first to generate this file."
|
||||
)
|
||||
|
||||
print(f"\n[SIMULATION {_trial_count}] Using existing OP2: {op2_file.name}")
|
||||
return op2_file
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("="*60)
|
||||
print("HOOK SYSTEM VALIDATION TEST")
|
||||
print("="*60)
|
||||
print("\nThis test will:")
|
||||
print("- Run 3 quick optimization trials")
|
||||
print("- Use dummy simulations (re-use existing OP2 file)")
|
||||
print("- Verify hooks execute at all lifecycle points")
|
||||
print("- Check that optimization completes successfully")
|
||||
print("="*60)
|
||||
|
||||
config_path = atomizer_paths.studies() / "bracket_stress_minimization/optimization_config_stress_displacement.json"
|
||||
|
||||
# Create runner (this should load plugins automatically)
|
||||
print("\n[INIT] Creating OptimizationRunner...")
|
||||
runner = OptimizationRunner(
|
||||
config_path=config_path,
|
||||
model_updater=dummy_model_updater,
|
||||
simulation_runner=dummy_simulation_runner,
|
||||
result_extractors={
|
||||
'stress_extractor': stress_extractor,
|
||||
'displacement_extractor': displacement_extractor
|
||||
}
|
||||
)
|
||||
|
||||
# Check if hooks were loaded
|
||||
print("\n[HOOKS] Checking hook system...")
|
||||
hook_summary = runner.hook_manager.get_summary()
|
||||
print(f" Total hooks: {hook_summary['total_hooks']}")
|
||||
print(f" Enabled hooks: {hook_summary['enabled_hooks']}")
|
||||
|
||||
if hook_summary['total_hooks'] > 0:
|
||||
print("\n Hooks by point:")
|
||||
for point, info in hook_summary['by_hook_point'].items():
|
||||
if info['total'] > 0:
|
||||
print(f" {point}: {info['names']}")
|
||||
else:
|
||||
print(" WARNING: No hooks loaded! Plugin directory may be empty.")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("RUNNING 3-TRIAL TEST")
|
||||
print("="*60)
|
||||
|
||||
try:
|
||||
# Run just 3 trials for quick validation
|
||||
study = runner.run(
|
||||
study_name="hook_validation_test",
|
||||
n_trials=3,
|
||||
resume=False
|
||||
)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("TEST COMPLETE!")
|
||||
print("="*60)
|
||||
|
||||
# Check hook execution history
|
||||
hook_history = runner.hook_manager.get_history()
|
||||
successful_hooks = [h for h in hook_history if h['success']]
|
||||
failed_hooks = [h for h in hook_history if not h['success']]
|
||||
|
||||
print(f"\nHook Execution Summary:")
|
||||
print(f" Total hook executions: {len(hook_history)}")
|
||||
print(f" Successful: {len(successful_hooks)}")
|
||||
print(f" Failed: {len(failed_hooks)}")
|
||||
|
||||
if successful_hooks:
|
||||
print("\n Successful hook executions:")
|
||||
for h in successful_hooks[:10]: # Show first 10
|
||||
print(f" Trial {h.get('trial_number', '?')}: {h['hook_name']} at {h['hook_point']}")
|
||||
|
||||
if failed_hooks:
|
||||
print("\n FAILED hook executions:")
|
||||
for h in failed_hooks:
|
||||
print(f" Trial {h.get('trial_number', '?')}: {h['hook_name']} - {h.get('error', 'unknown error')}")
|
||||
|
||||
print(f"\nOptimization Results:")
|
||||
print(f" Best value: {study.best_value:.2f}")
|
||||
print(f" Best parameters:")
|
||||
for param, value in study.best_params.items():
|
||||
print(f" {param}: {value:.4f}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("VALIDATION PASSED!")
|
||||
print("="*60)
|
||||
print("\nThe hook system is working correctly:")
|
||||
print(" [OK] Hooks loaded from plugin directory")
|
||||
print(" [OK] Hooks executed during optimization")
|
||||
print(" [OK] Optimization completed successfully")
|
||||
print(" [OK] Results extracted correctly")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n{'='*60}")
|
||||
print("TEST FAILED")
|
||||
print("="*60)
|
||||
print(f"{e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
Reference in New Issue
Block a user