BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
128 lines
3.7 KiB
Python
128 lines
3.7 KiB
Python
"""
|
|
Run Benchmarking for Simple Beam Study
|
|
"""
|
|
|
|
import sys
|
|
import shutil
|
|
from pathlib import Path
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from optimization_engine.study.creator import StudyCreator
|
|
import logging
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def main():
|
|
print()
|
|
print("=" * 80)
|
|
print("SIMPLE BEAM OPTIMIZATION - BENCHMARKING")
|
|
print("=" * 80)
|
|
print()
|
|
print("GOAL: Minimize displacement and weight")
|
|
print()
|
|
|
|
creator = StudyCreator()
|
|
|
|
# Create study
|
|
study_name = "simple_beam_optimization"
|
|
description = "Minimize displacement and weight of beam"
|
|
|
|
study_dir = creator.create_study(study_name, description)
|
|
print(f"[OK] Study: {study_dir}")
|
|
print()
|
|
|
|
# Copy models
|
|
source_dir = Path("examples/Models/Simple Beam")
|
|
prt_dest = study_dir / "model" / "Beam.prt"
|
|
sim_dest = study_dir / "model" / "Beam_sim1.sim"
|
|
fem_prt_dest = study_dir / "model" / "Beam_fem1_i.prt"
|
|
fem_dest = study_dir / "model" / "Beam_fem1.fem"
|
|
|
|
shutil.copy2(source_dir / "Beam.prt", prt_dest)
|
|
shutil.copy2(source_dir / "Beam_sim1.sim", sim_dest)
|
|
shutil.copy2(source_dir / "Beam_fem1_i.prt", fem_prt_dest)
|
|
shutil.copy2(source_dir / "Beam_fem1.fem", fem_dest)
|
|
|
|
print("[OK] Models copied (including .fem file)")
|
|
print()
|
|
|
|
# Run benchmarking
|
|
print("Running benchmarking...")
|
|
print()
|
|
|
|
try:
|
|
results = creator.run_benchmarking(study_dir, prt_dest, sim_dest)
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("BENCHMARKING RESULTS")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
if results.validation_passed:
|
|
print("[PASS] Validation successful")
|
|
else:
|
|
print("[FAIL] Validation failed")
|
|
for error in results.errors:
|
|
print(f" Error: {error}")
|
|
return False
|
|
|
|
print()
|
|
print(f"Expressions found: {results.expression_count}")
|
|
for name, info in results.expressions.items():
|
|
print(f" - {name}: {info['value']} {info['units']}")
|
|
|
|
print()
|
|
print("OP2 Analysis:")
|
|
print(f" Elements: {', '.join(results.element_types)}")
|
|
print(f" Results: {', '.join(results.result_types)}")
|
|
print(f" Nodes: {results.node_count}, Elements: {results.element_count}")
|
|
|
|
print()
|
|
print("Baseline Performance:")
|
|
for key, value in results.baseline_results.items():
|
|
print(f" - {key}: {value}")
|
|
|
|
print()
|
|
print(f"Proposed Design Variables: {len(results.proposed_design_variables)}")
|
|
for var in results.proposed_design_variables:
|
|
print(f" - {var['parameter']}: {var['suggested_range']}")
|
|
|
|
print()
|
|
print(f"Proposed Extractors: {len(results.proposed_extractors)}")
|
|
for ext in results.proposed_extractors:
|
|
print(f" - {ext['action']}: {ext['description']}")
|
|
|
|
print()
|
|
print(f"Proposed Objectives: {len(results.proposed_objectives)}")
|
|
for obj in results.proposed_objectives:
|
|
print(f" - {obj}")
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("NEXT STEPS")
|
|
print("=" * 80)
|
|
print()
|
|
print("1. Review benchmark report:")
|
|
print(f" {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
|
|
print()
|
|
print("2. I will analyze and propose optimization setup")
|
|
print()
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print()
|
|
print(f"[FAIL] Benchmarking failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
|
|
if __name__ == '__main__':
|
|
success = main()
|
|
sys.exit(0 if success else 1)
|