Files
Atomizer/tests/test_beam_workflow.py
Anto01 b4c0831230 fix: Remove redundant save() call that overwrote NX expression updates
Critical bug fix for LLM mode optimization:

**Problem**:
- NXParameterUpdater.update_expressions() uses NX journal to import expressions (default use_nx_import=True)
- The NX journal directly updates the PRT file on disk and saves it
- But then run_optimization.py was calling updater.save() afterwards
- save() writes self.content (loaded at initialization) back to file
- This overwrote the NX journal changes with stale binary content!

**Result**: All optimization trials produced identical FEM results because the model was never actually updated.

**Fixes**:
1. Removed updater.save() call from model_updater closure in run_optimization.py
2. Added theSession.Parts.CloseAll() in import_expressions.py to ensure changes are flushed and file is released
3. Fixed test_phase_3_2_e2e.py variable name (best_trial_file → results_file)

**Testing**: Verified expressions persist to disk correctly with standalone test.

Next step: Address remaining issue where FEM results are still identical (likely solve journal not reloading updated PRT).
2025-11-17 21:24:02 -05:00

245 lines
7.5 KiB
Python

"""
Test Complete Workflow with Cantilever Beam Optimization
This script tests the complete professional optimization workflow:
1. Study creation
2. Mandatory benchmarking
3. Substudy creation with auto-numbering
4. Configuration
5. Integration testing
Run this after placing beam.prt and beam_sim1.sim in the study model/ directory.
Author: Antoine Letarte
Date: 2025-11-17
"""
import sys
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.study_creator import StudyCreator
import logging
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def test_complete_workflow():
"""Test the complete optimization workflow."""
print("=" * 80)
print("CANTILEVER BEAM OPTIMIZATION - COMPLETE WORKFLOW TEST")
print("=" * 80)
print()
# Initialize creator
creator = StudyCreator()
# Step 1: Create study
print("Step 1: Creating study...")
print("-" * 40)
study_name = "cantilever_beam_optimization"
description = "Minimize mass of cantilevered beam with hole while ensuring safety"
study_dir = creator.create_study(study_name, description)
print(f"✅ Study created: {study_dir}")
print()
# Verify structure
assert (study_dir / "model").exists(), "model/ directory not created"
assert (study_dir / "substudies" / "benchmarking").exists(), "benchmarking/ not created"
assert (study_dir / "study_metadata.json").exists(), "metadata not created"
print("✅ Study structure validated")
print()
# Step 2: Check for NX models
print("Step 2: Checking for NX models...")
print("-" * 40)
prt_file = study_dir / "model" / "beam.prt"
sim_file = study_dir / "model" / "beam_sim1.sim"
if not prt_file.exists() or not sim_file.exists():
print("⚠️ NX models not found!")
print(f" Expected: {prt_file}")
print(f" Expected: {sim_file}")
print()
print("NEXT STEPS:")
print(f"1. Create beam.prt and beam_sim1.sim in NX")
print(f"2. Place them in: {study_dir / 'model'}/")
print(f"3. Re-run this script")
print()
return False
print(f"✅ Found: {prt_file.name}")
print(f"✅ Found: {sim_file.name}")
print()
# Step 3: Run benchmarking
print("Step 3: Running mandatory benchmarking...")
print("-" * 40)
print("This will:")
print(" - Discover expressions (beam_thickness, hole_diameter, hole_position)")
print(" - Run baseline simulation")
print(" - Analyze OP2 contents")
print(" - Extract baseline performance")
print(" - Propose configuration")
print()
try:
benchmark_results = creator.run_benchmarking(
study_dir=study_dir,
prt_file=prt_file,
sim_file=sim_file
)
print()
print("=" * 80)
print("BENCHMARKING RESULTS")
print("=" * 80)
print()
print(f"Validation: {'✅ PASSED' if benchmark_results.validation_passed else '❌ FAILED'}")
print(f"Expressions found: {benchmark_results.expression_count}")
print(f"Element types: {', '.join(benchmark_results.element_types)}")
print(f"Result types: {', '.join(benchmark_results.result_types)}")
print()
print("Baseline Performance:")
for key, value in benchmark_results.baseline_results.items():
print(f" - {key}: {value}")
print()
print(f"Proposed design variables: {len(benchmark_results.proposed_design_variables)}")
for var in benchmark_results.proposed_design_variables:
print(f" - {var['parameter']}: {var.get('suggested_range', 'define range')}")
print()
if not benchmark_results.validation_passed:
print("❌ Benchmarking validation FAILED")
print(f"Errors: {benchmark_results.errors}")
print()
return False
print("✅ Benchmarking validation PASSED")
print()
except Exception as e:
print(f"❌ Benchmarking failed: {e}")
import traceback
traceback.print_exc()
return False
# Step 4: Test substudy creation with auto-numbering
print("Step 4: Testing substudy creation...")
print("-" * 40)
try:
# Create with auto-numbering (should be substudy_1)
substudy_1 = creator.create_substudy(study_dir)
print(f"✅ Created: {substudy_1.name}")
assert substudy_1.name == "substudy_1", f"Expected substudy_1, got {substudy_1.name}"
# Create another (should be substudy_2)
substudy_2 = creator.create_substudy(study_dir)
print(f"✅ Created: {substudy_2.name}")
assert substudy_2.name == "substudy_2", f"Expected substudy_2, got {substudy_2.name}"
# Create with custom name
substudy_custom = creator.create_substudy(study_dir, substudy_name="integration_test")
print(f"✅ Created: {substudy_custom.name}")
assert substudy_custom.name == "integration_test"
print()
print("✅ Substudy auto-numbering works correctly")
print()
except Exception as e:
print(f"❌ Substudy creation failed: {e}")
import traceback
traceback.print_exc()
return False
# Step 5: Verify configuration templates
print("Step 5: Verifying configuration templates...")
print("-" * 40)
import json
config_file = substudy_1 / "config.json"
if config_file.exists():
with open(config_file) as f:
config = json.load(f)
print(f"✅ Config template created")
print(f" Design variables: {len(config['optimization']['design_variables'])}")
for var in config['optimization']['design_variables']:
print(f" - {var['parameter']}: {var['min']} to {var['max']} {var.get('units', '')}")
print()
# Step 6: List all studies
print("Step 6: Listing all studies...")
print("-" * 40)
studies = creator.list_studies()
for study in studies:
status_icon = "" if study['benchmarking_completed'] else "⚠️"
print(f"{status_icon} {study['name']} ({study['status']}) - {study['substudies_count']} substudies")
print()
# Summary
print("=" * 80)
print("WORKFLOW TEST COMPLETE")
print("=" * 80)
print()
print("✅ Study creation works")
print("✅ Mandatory benchmarking works")
print("✅ Auto-numbering works (substudy_1, substudy_2, ...)")
print("✅ Custom naming works")
print("✅ Configuration templates work")
print()
print("NEXT STEPS:")
print("1. Edit substudy config files with desired ranges")
print("2. Run integration test (2-3 trials)")
print("3. Run full optimization")
print()
print(f"Study location: {study_dir}")
print(f"Benchmark report: {study_dir / 'substudies/benchmarking/BENCHMARK_REPORT.md'}")
print()
return True
def main():
"""Run the test."""
try:
success = test_complete_workflow()
if success:
print("=" * 80)
print("ALL TESTS PASSED!")
print("=" * 80)
sys.exit(0)
else:
print("=" * 80)
print("TEST INCOMPLETE - See messages above")
print("=" * 80)
sys.exit(1)
except Exception as e:
print()
print("=" * 80)
print(f"TEST FAILED: {e}")
print("=" * 80)
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == '__main__':
main()