2025-11-17 21:24:02 -05:00
|
|
|
"""
|
|
|
|
|
Test Complete Workflow with Cantilever Beam Optimization
|
|
|
|
|
|
|
|
|
|
This script tests the complete professional optimization workflow:
|
|
|
|
|
1. Study creation
|
|
|
|
|
2. Mandatory benchmarking
|
|
|
|
|
3. Substudy creation with auto-numbering
|
|
|
|
|
4. Configuration
|
|
|
|
|
5. Integration testing
|
|
|
|
|
|
|
|
|
|
Run this after placing beam.prt and beam_sim1.sim in the study model/ directory.
|
|
|
|
|
|
|
|
|
|
Author: Antoine Letarte
|
|
|
|
|
Date: 2025-11-17
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import sys
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
|
|
# Add parent directory to path
|
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
|
|
refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability.
Backwards compatibility aliases with deprecation warnings are provided.
New Structure:
- core/ - Optimization runners (runner, intelligent_optimizer, etc.)
- processors/ - Data processing
- surrogates/ - Neural network surrogates
- nx/ - NX/Nastran integration (solver, updater, session_manager)
- study/ - Study management (creator, wizard, state, reset)
- reporting/ - Reports and analysis (visualizer, report_generator)
- config/ - Configuration management (manager, builder)
- utils/ - Utilities (logger, auto_doc, etc.)
- future/ - Research/experimental code
Migration:
- ~200 import changes across 125 files
- All __init__.py files use lazy loading to avoid circular imports
- Backwards compatibility layer supports old import paths with warnings
- All existing functionality preserved
To migrate existing code:
OLD: from optimization_engine.nx_solver import NXSolver
NEW: from optimization_engine.nx.solver import NXSolver
OLD: from optimization_engine.runner import OptimizationRunner
NEW: from optimization_engine.core.runner import OptimizationRunner
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-29 12:30:59 -05:00
|
|
|
from optimization_engine.study.creator import StudyCreator
|
2025-11-17 21:24:02 -05:00
|
|
|
import logging
|
|
|
|
|
|
|
|
|
|
# Setup logging
|
|
|
|
|
logging.basicConfig(
|
|
|
|
|
level=logging.INFO,
|
|
|
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_complete_workflow():
|
|
|
|
|
"""Test the complete optimization workflow."""
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("CANTILEVER BEAM OPTIMIZATION - COMPLETE WORKFLOW TEST")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Initialize creator
|
|
|
|
|
creator = StudyCreator()
|
|
|
|
|
|
|
|
|
|
# Step 1: Create study
|
|
|
|
|
print("Step 1: Creating study...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
study_name = "cantilever_beam_optimization"
|
|
|
|
|
description = "Minimize mass of cantilevered beam with hole while ensuring safety"
|
|
|
|
|
|
|
|
|
|
study_dir = creator.create_study(study_name, description)
|
|
|
|
|
print(f"✅ Study created: {study_dir}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Verify structure
|
|
|
|
|
assert (study_dir / "model").exists(), "model/ directory not created"
|
|
|
|
|
assert (study_dir / "substudies" / "benchmarking").exists(), "benchmarking/ not created"
|
|
|
|
|
assert (study_dir / "study_metadata.json").exists(), "metadata not created"
|
|
|
|
|
print("✅ Study structure validated")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Step 2: Check for NX models
|
|
|
|
|
print("Step 2: Checking for NX models...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
prt_file = study_dir / "model" / "beam.prt"
|
|
|
|
|
sim_file = study_dir / "model" / "beam_sim1.sim"
|
|
|
|
|
|
|
|
|
|
if not prt_file.exists() or not sim_file.exists():
|
|
|
|
|
print("⚠️ NX models not found!")
|
|
|
|
|
print(f" Expected: {prt_file}")
|
|
|
|
|
print(f" Expected: {sim_file}")
|
|
|
|
|
print()
|
|
|
|
|
print("NEXT STEPS:")
|
|
|
|
|
print(f"1. Create beam.prt and beam_sim1.sim in NX")
|
|
|
|
|
print(f"2. Place them in: {study_dir / 'model'}/")
|
|
|
|
|
print(f"3. Re-run this script")
|
|
|
|
|
print()
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
print(f"✅ Found: {prt_file.name}")
|
|
|
|
|
print(f"✅ Found: {sim_file.name}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Step 3: Run benchmarking
|
|
|
|
|
print("Step 3: Running mandatory benchmarking...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
print("This will:")
|
|
|
|
|
print(" - Discover expressions (beam_thickness, hole_diameter, hole_position)")
|
|
|
|
|
print(" - Run baseline simulation")
|
|
|
|
|
print(" - Analyze OP2 contents")
|
|
|
|
|
print(" - Extract baseline performance")
|
|
|
|
|
print(" - Propose configuration")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
benchmark_results = creator.run_benchmarking(
|
|
|
|
|
study_dir=study_dir,
|
|
|
|
|
prt_file=prt_file,
|
|
|
|
|
sim_file=sim_file
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
print()
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("BENCHMARKING RESULTS")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
print(f"Validation: {'✅ PASSED' if benchmark_results.validation_passed else '❌ FAILED'}")
|
|
|
|
|
print(f"Expressions found: {benchmark_results.expression_count}")
|
|
|
|
|
print(f"Element types: {', '.join(benchmark_results.element_types)}")
|
|
|
|
|
print(f"Result types: {', '.join(benchmark_results.result_types)}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
print("Baseline Performance:")
|
|
|
|
|
for key, value in benchmark_results.baseline_results.items():
|
|
|
|
|
print(f" - {key}: {value}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
print(f"Proposed design variables: {len(benchmark_results.proposed_design_variables)}")
|
|
|
|
|
for var in benchmark_results.proposed_design_variables:
|
|
|
|
|
print(f" - {var['parameter']}: {var.get('suggested_range', 'define range')}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
if not benchmark_results.validation_passed:
|
|
|
|
|
print("❌ Benchmarking validation FAILED")
|
|
|
|
|
print(f"Errors: {benchmark_results.errors}")
|
|
|
|
|
print()
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
print("✅ Benchmarking validation PASSED")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"❌ Benchmarking failed: {e}")
|
|
|
|
|
import traceback
|
|
|
|
|
traceback.print_exc()
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# Step 4: Test substudy creation with auto-numbering
|
|
|
|
|
print("Step 4: Testing substudy creation...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Create with auto-numbering (should be substudy_1)
|
|
|
|
|
substudy_1 = creator.create_substudy(study_dir)
|
|
|
|
|
print(f"✅ Created: {substudy_1.name}")
|
|
|
|
|
assert substudy_1.name == "substudy_1", f"Expected substudy_1, got {substudy_1.name}"
|
|
|
|
|
|
|
|
|
|
# Create another (should be substudy_2)
|
|
|
|
|
substudy_2 = creator.create_substudy(study_dir)
|
|
|
|
|
print(f"✅ Created: {substudy_2.name}")
|
|
|
|
|
assert substudy_2.name == "substudy_2", f"Expected substudy_2, got {substudy_2.name}"
|
|
|
|
|
|
|
|
|
|
# Create with custom name
|
|
|
|
|
substudy_custom = creator.create_substudy(study_dir, substudy_name="integration_test")
|
|
|
|
|
print(f"✅ Created: {substudy_custom.name}")
|
|
|
|
|
assert substudy_custom.name == "integration_test"
|
|
|
|
|
|
|
|
|
|
print()
|
|
|
|
|
print("✅ Substudy auto-numbering works correctly")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"❌ Substudy creation failed: {e}")
|
|
|
|
|
import traceback
|
|
|
|
|
traceback.print_exc()
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# Step 5: Verify configuration templates
|
|
|
|
|
print("Step 5: Verifying configuration templates...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
config_file = substudy_1 / "config.json"
|
|
|
|
|
if config_file.exists():
|
|
|
|
|
with open(config_file) as f:
|
|
|
|
|
config = json.load(f)
|
|
|
|
|
|
|
|
|
|
print(f"✅ Config template created")
|
|
|
|
|
print(f" Design variables: {len(config['optimization']['design_variables'])}")
|
|
|
|
|
for var in config['optimization']['design_variables']:
|
|
|
|
|
print(f" - {var['parameter']}: {var['min']} to {var['max']} {var.get('units', '')}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Step 6: List all studies
|
|
|
|
|
print("Step 6: Listing all studies...")
|
|
|
|
|
print("-" * 40)
|
|
|
|
|
|
|
|
|
|
studies = creator.list_studies()
|
|
|
|
|
for study in studies:
|
|
|
|
|
status_icon = "✅" if study['benchmarking_completed'] else "⚠️"
|
|
|
|
|
print(f"{status_icon} {study['name']} ({study['status']}) - {study['substudies_count']} substudies")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
# Summary
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("WORKFLOW TEST COMPLETE")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print()
|
|
|
|
|
print("✅ Study creation works")
|
|
|
|
|
print("✅ Mandatory benchmarking works")
|
|
|
|
|
print("✅ Auto-numbering works (substudy_1, substudy_2, ...)")
|
|
|
|
|
print("✅ Custom naming works")
|
|
|
|
|
print("✅ Configuration templates work")
|
|
|
|
|
print()
|
|
|
|
|
print("NEXT STEPS:")
|
|
|
|
|
print("1. Edit substudy config files with desired ranges")
|
|
|
|
|
print("2. Run integration test (2-3 trials)")
|
|
|
|
|
print("3. Run full optimization")
|
|
|
|
|
print()
|
|
|
|
|
print(f"Study location: {study_dir}")
|
|
|
|
|
print(f"Benchmark report: {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
"""Run the test."""
|
|
|
|
|
try:
|
|
|
|
|
success = test_complete_workflow()
|
|
|
|
|
if success:
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("ALL TESTS PASSED!")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
sys.exit(0)
|
|
|
|
|
else:
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("TEST INCOMPLETE - See messages above")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print()
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print(f"TEST FAILED: {e}")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
import traceback
|
|
|
|
|
traceback.print_exc()
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
main()
|