Critical bug fix for LLM mode optimization: **Problem**: - NXParameterUpdater.update_expressions() uses NX journal to import expressions (default use_nx_import=True) - The NX journal directly updates the PRT file on disk and saves it - But then run_optimization.py was calling updater.save() afterwards - save() writes self.content (loaded at initialization) back to file - This overwrote the NX journal changes with stale binary content! **Result**: All optimization trials produced identical FEM results because the model was never actually updated. **Fixes**: 1. Removed updater.save() call from model_updater closure in run_optimization.py 2. Added theSession.Parts.CloseAll() in import_expressions.py to ensure changes are flushed and file is released 3. Fixed test_phase_3_2_e2e.py variable name (best_trial_file → results_file) **Testing**: Verified expressions persist to disk correctly with standalone test. Next step: Address remaining issue where FEM results are still identical (likely solve journal not reloading updated PRT).
128 lines
3.7 KiB
Python
128 lines
3.7 KiB
Python
"""
|
|
Run Benchmarking for Simple Beam Study
|
|
"""
|
|
|
|
import sys
|
|
import shutil
|
|
from pathlib import Path
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from optimization_engine.study_creator import StudyCreator
|
|
import logging
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def main():
|
|
print()
|
|
print("=" * 80)
|
|
print("SIMPLE BEAM OPTIMIZATION - BENCHMARKING")
|
|
print("=" * 80)
|
|
print()
|
|
print("GOAL: Minimize displacement and weight")
|
|
print()
|
|
|
|
creator = StudyCreator()
|
|
|
|
# Create study
|
|
study_name = "simple_beam_optimization"
|
|
description = "Minimize displacement and weight of beam"
|
|
|
|
study_dir = creator.create_study(study_name, description)
|
|
print(f"[OK] Study: {study_dir}")
|
|
print()
|
|
|
|
# Copy models
|
|
source_dir = Path("examples/Models/Simple Beam")
|
|
prt_dest = study_dir / "model" / "Beam.prt"
|
|
sim_dest = study_dir / "model" / "Beam_sim1.sim"
|
|
fem_prt_dest = study_dir / "model" / "Beam_fem1_i.prt"
|
|
fem_dest = study_dir / "model" / "Beam_fem1.fem"
|
|
|
|
shutil.copy2(source_dir / "Beam.prt", prt_dest)
|
|
shutil.copy2(source_dir / "Beam_sim1.sim", sim_dest)
|
|
shutil.copy2(source_dir / "Beam_fem1_i.prt", fem_prt_dest)
|
|
shutil.copy2(source_dir / "Beam_fem1.fem", fem_dest)
|
|
|
|
print("[OK] Models copied (including .fem file)")
|
|
print()
|
|
|
|
# Run benchmarking
|
|
print("Running benchmarking...")
|
|
print()
|
|
|
|
try:
|
|
results = creator.run_benchmarking(study_dir, prt_dest, sim_dest)
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("BENCHMARKING RESULTS")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
if results.validation_passed:
|
|
print("[PASS] Validation successful")
|
|
else:
|
|
print("[FAIL] Validation failed")
|
|
for error in results.errors:
|
|
print(f" Error: {error}")
|
|
return False
|
|
|
|
print()
|
|
print(f"Expressions found: {results.expression_count}")
|
|
for name, info in results.expressions.items():
|
|
print(f" - {name}: {info['value']} {info['units']}")
|
|
|
|
print()
|
|
print("OP2 Analysis:")
|
|
print(f" Elements: {', '.join(results.element_types)}")
|
|
print(f" Results: {', '.join(results.result_types)}")
|
|
print(f" Nodes: {results.node_count}, Elements: {results.element_count}")
|
|
|
|
print()
|
|
print("Baseline Performance:")
|
|
for key, value in results.baseline_results.items():
|
|
print(f" - {key}: {value}")
|
|
|
|
print()
|
|
print(f"Proposed Design Variables: {len(results.proposed_design_variables)}")
|
|
for var in results.proposed_design_variables:
|
|
print(f" - {var['parameter']}: {var['suggested_range']}")
|
|
|
|
print()
|
|
print(f"Proposed Extractors: {len(results.proposed_extractors)}")
|
|
for ext in results.proposed_extractors:
|
|
print(f" - {ext['action']}: {ext['description']}")
|
|
|
|
print()
|
|
print(f"Proposed Objectives: {len(results.proposed_objectives)}")
|
|
for obj in results.proposed_objectives:
|
|
print(f" - {obj}")
|
|
|
|
print()
|
|
print("=" * 80)
|
|
print("NEXT STEPS")
|
|
print("=" * 80)
|
|
print()
|
|
print("1. Review benchmark report:")
|
|
print(f" {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
|
|
print()
|
|
print("2. I will analyze and propose optimization setup")
|
|
print()
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print()
|
|
print(f"[FAIL] Benchmarking failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
|
|
if __name__ == '__main__':
|
|
success = main()
|
|
sys.exit(0 if success else 1)
|