Files
Atomizer/tests/run_benchmarking_simple.py

128 lines
3.7 KiB
Python
Raw Normal View History

"""
Run Benchmarking for Simple Beam Study
"""
import sys
import shutil
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.study.creator import StudyCreator
import logging
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def main():
print()
print("=" * 80)
print("SIMPLE BEAM OPTIMIZATION - BENCHMARKING")
print("=" * 80)
print()
print("GOAL: Minimize displacement and weight")
print()
creator = StudyCreator()
# Create study
study_name = "simple_beam_optimization"
description = "Minimize displacement and weight of beam"
study_dir = creator.create_study(study_name, description)
print(f"[OK] Study: {study_dir}")
print()
# Copy models
source_dir = Path("examples/Models/Simple Beam")
prt_dest = study_dir / "model" / "Beam.prt"
sim_dest = study_dir / "model" / "Beam_sim1.sim"
fem_prt_dest = study_dir / "model" / "Beam_fem1_i.prt"
fem_dest = study_dir / "model" / "Beam_fem1.fem"
shutil.copy2(source_dir / "Beam.prt", prt_dest)
shutil.copy2(source_dir / "Beam_sim1.sim", sim_dest)
shutil.copy2(source_dir / "Beam_fem1_i.prt", fem_prt_dest)
shutil.copy2(source_dir / "Beam_fem1.fem", fem_dest)
print("[OK] Models copied (including .fem file)")
print()
# Run benchmarking
print("Running benchmarking...")
print()
try:
results = creator.run_benchmarking(study_dir, prt_dest, sim_dest)
print()
print("=" * 80)
print("BENCHMARKING RESULTS")
print("=" * 80)
print()
if results.validation_passed:
print("[PASS] Validation successful")
else:
print("[FAIL] Validation failed")
for error in results.errors:
print(f" Error: {error}")
return False
print()
print(f"Expressions found: {results.expression_count}")
for name, info in results.expressions.items():
print(f" - {name}: {info['value']} {info['units']}")
print()
print("OP2 Analysis:")
print(f" Elements: {', '.join(results.element_types)}")
print(f" Results: {', '.join(results.result_types)}")
print(f" Nodes: {results.node_count}, Elements: {results.element_count}")
print()
print("Baseline Performance:")
for key, value in results.baseline_results.items():
print(f" - {key}: {value}")
print()
print(f"Proposed Design Variables: {len(results.proposed_design_variables)}")
for var in results.proposed_design_variables:
print(f" - {var['parameter']}: {var['suggested_range']}")
print()
print(f"Proposed Extractors: {len(results.proposed_extractors)}")
for ext in results.proposed_extractors:
print(f" - {ext['action']}: {ext['description']}")
print()
print(f"Proposed Objectives: {len(results.proposed_objectives)}")
for obj in results.proposed_objectives:
print(f" - {obj}")
print()
print("=" * 80)
print("NEXT STEPS")
print("=" * 80)
print()
print("1. Review benchmark report:")
print(f" {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
print()
print("2. I will analyze and propose optimization setup")
print()
return True
except Exception as e:
print()
print(f"[FAIL] Benchmarking failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == '__main__':
success = main()
sys.exit(0 if success else 1)