BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
255 lines
7.9 KiB
Python
255 lines
7.9 KiB
Python
"""
|
|
Run Benchmarking for Simple Beam Study
|
|
|
|
This script:
|
|
1. Creates the study structure
|
|
2. Copies NX models to proper location
|
|
3. Runs mandatory benchmarking
|
|
4. Generates proposals for optimization setup
|
|
|
|
Author: Antoine Letarte
|
|
Date: 2025-11-17
|
|
"""
|
|
|
|
import sys
|
|
import shutil
|
|
from pathlib import Path
|
|
|
|
# Add parent directory to path
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from optimization_engine.study.creator import StudyCreator
|
|
import logging
|
|
|
|
# Setup logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(levelname)s - %(message)s'
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def main():
|
|
"""Run complete benchmarking workflow."""
|
|
print()
|
|
print("=" * 80)
|
|
print("SIMPLE BEAM OPTIMIZATION - BENCHMARKING & DISCOVERY")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
# User requirement
|
|
print("OPTIMIZATION GOAL:")
|
|
print(" - Minimize displacement")
|
|
print(" - Minimize weight")
|
|
print(" - With actual loadcases")
|
|
print()
|
|
|
|
# Initialize creator
|
|
creator = StudyCreator()
|
|
|
|
# Step 1: Create study
|
|
print("Step 1: Creating study structure...")
|
|
print("-" * 40)
|
|
|
|
study_name = "simple_beam_optimization"
|
|
description = "Minimize displacement and weight of beam with existing loadcases"
|
|
|
|
study_dir = creator.create_study(study_name, description)
|
|
print(f"[OK] Study created: {study_dir}")
|
|
print()
|
|
|
|
# Step 2: Copy NX models to study
|
|
print("Step 2: Copying NX models to study...")
|
|
print("-" * 40)
|
|
|
|
source_dir = Path("examples/Models/Simple Beam")
|
|
|
|
# Copy main part file
|
|
prt_source = source_dir / "Beam.prt"
|
|
prt_dest = study_dir / "model" / "Beam.prt"
|
|
|
|
# Copy simulation file
|
|
sim_source = source_dir / "Beam_sim1.sim"
|
|
sim_dest = study_dir / "model" / "Beam_sim1.sim"
|
|
|
|
# Copy FEM file (may be needed by simulation)
|
|
fem_source = source_dir / "Beam_fem1_i.prt"
|
|
fem_dest = study_dir / "model" / "Beam_fem1_i.prt"
|
|
|
|
shutil.copy2(prt_source, prt_dest)
|
|
print(f"[OK] Copied: {prt_source.name} → {prt_dest}")
|
|
|
|
shutil.copy2(sim_source, sim_dest)
|
|
print(f"[OK] Copied: {sim_source.name} → {sim_dest}")
|
|
|
|
shutil.copy2(fem_source, fem_dest)
|
|
print(f"[OK] Copied: {fem_source.name} → {fem_dest}")
|
|
print()
|
|
|
|
# Step 3: Run mandatory benchmarking
|
|
print("Step 3: Running mandatory benchmarking...")
|
|
print("-" * 40)
|
|
print()
|
|
print("This will discover:")
|
|
print(" - All expressions in the model")
|
|
print(" - Element types and result types in OP2")
|
|
print(" - Baseline performance (displacement, stress)")
|
|
print(" - Propose design variables, extractors, objectives")
|
|
print()
|
|
print("Starting benchmarking workflow...")
|
|
print()
|
|
|
|
try:
|
|
benchmark_results = creator.run_benchmarking(
|
|
study_dir=study_dir,
|
|
prt_file=prt_dest,
|
|
sim_file=sim_dest
|
|
)
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("BENCHMARKING COMPLETE")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
# Display results
|
|
if benchmark_results.validation_passed:
|
|
print("[OK] VALIDATION PASSED - Ready for optimization")
|
|
else:
|
|
print("[FAIL] VALIDATION FAILED")
|
|
print(f"Errors: {benchmark_results.errors}")
|
|
return False
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("DISCOVERIES")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
# Expressions discovered
|
|
print(f" Expressions Found: {benchmark_results.expression_count}")
|
|
print()
|
|
for name, info in benchmark_results.expressions.items():
|
|
print(f" • {name}")
|
|
print(f" Value: {info['value']} {info['units']}")
|
|
if info.get('formula'):
|
|
print(f" Formula: {info['formula']}")
|
|
print()
|
|
|
|
# OP2 contents
|
|
print(" OP2 Analysis:")
|
|
print(f" • Element Types: {', '.join(benchmark_results.element_types)}")
|
|
print(f" • Result Types: {', '.join(benchmark_results.result_types)}")
|
|
print(f" • Subcases: {benchmark_results.subcases}")
|
|
print(f" • Nodes: {benchmark_results.node_count}")
|
|
print(f" • Elements: {benchmark_results.element_count}")
|
|
print()
|
|
|
|
# Baseline performance
|
|
print(" Baseline Performance:")
|
|
if benchmark_results.baseline_results:
|
|
for key, value in benchmark_results.baseline_results.items():
|
|
print(f" • {key}: {value}")
|
|
else:
|
|
print(" (No baseline results extracted)")
|
|
print()
|
|
|
|
print("=" * 80)
|
|
print("OPTIMIZATION SETUP PROPOSAL")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
# Proposed design variables
|
|
print(f" Proposed Design Variables ({len(benchmark_results.proposed_design_variables)}):")
|
|
print()
|
|
if benchmark_results.proposed_design_variables:
|
|
for i, var in enumerate(benchmark_results.proposed_design_variables, 1):
|
|
print(f" {i}. {var['parameter']}")
|
|
print(f" Current: {var['current_value']} {var['units']}")
|
|
print(f" Suggested range: {var['suggested_range']}")
|
|
print()
|
|
else:
|
|
print(" (No design variables proposed - may need manual definition)")
|
|
print()
|
|
|
|
# Proposed extractors
|
|
print(f" Proposed Extractors ({len(benchmark_results.proposed_extractors)}):")
|
|
print()
|
|
for i, ext in enumerate(benchmark_results.proposed_extractors, 1):
|
|
print(f" {i}. {ext['action']}")
|
|
print(f" Description: {ext['description']}")
|
|
print(f" Parameters: {ext['params']}")
|
|
print()
|
|
|
|
# Proposed objectives
|
|
print(f" Proposed Objectives ({len(benchmark_results.proposed_objectives)}):")
|
|
print()
|
|
for i, obj in enumerate(benchmark_results.proposed_objectives, 1):
|
|
print(f" {i}. {obj}")
|
|
print()
|
|
|
|
# User goals alignment
|
|
print("=" * 80)
|
|
print("ALIGNMENT WITH YOUR GOALS")
|
|
print("=" * 80)
|
|
print()
|
|
print("Your stated goals:")
|
|
print(" ✓ Minimize displacement")
|
|
print(" ✓ Minimize weight")
|
|
print()
|
|
|
|
# Check if displacement extractor proposed
|
|
has_displacement = any('displacement' in ext['action'].lower()
|
|
for ext in benchmark_results.proposed_extractors)
|
|
print(f" {'[OK]' if has_displacement else '[WARN] '} Displacement extraction: "
|
|
f"{'Proposed' if has_displacement else 'May need custom extractor'}")
|
|
|
|
# Mass calculation will need a hook
|
|
print(f" [WARN] Mass/weight calculation: Will need post-processing hook")
|
|
print()
|
|
|
|
# Warnings
|
|
if benchmark_results.warnings:
|
|
print("[WARN] WARNINGS:")
|
|
for warning in benchmark_results.warnings:
|
|
print(f" • {warning}")
|
|
print()
|
|
|
|
print("=" * 80)
|
|
print("NEXT STEPS")
|
|
print("=" * 80)
|
|
print()
|
|
print("1. Review benchmark report:")
|
|
print(f" {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
|
|
print()
|
|
print("2. Based on discoveries, I will propose:")
|
|
print(" • Which expressions to use as design variables")
|
|
print(" • How to extract displacement from OP2")
|
|
print(" • How to compute beam mass/weight")
|
|
print(" • Complete optimization configuration")
|
|
print()
|
|
print("3. Create substudy with proposed configuration")
|
|
print()
|
|
print("4. Run integration test (2-3 trials)")
|
|
print()
|
|
print("5. Run full optimization")
|
|
print()
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print()
|
|
print("=" * 80)
|
|
print(f"[FAIL] BENCHMARKING FAILED: {e}")
|
|
print("=" * 80)
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
|
|
if __name__ == '__main__':
|
|
success = main()
|
|
sys.exit(0 if success else 1)
|
|
|