Major changes: - Dashboard: WebSocket-based chat with session management - Dashboard: New chat components (ChatPane, ChatInput, ModeToggle) - Dashboard: Enhanced UI with parallel coordinates chart - MCP Server: New atomizer-tools server for Claude integration - Extractors: Enhanced Zernike OPD extractor - Reports: Improved report generator New studies (configs and scripts only): - M1 Mirror: Cost reduction campaign studies - Simple Beam, Simple Bracket, UAV Arm studies Note: Large iteration data (2_iterations/, best_design_archive/) excluded via .gitignore - kept on local Gitea only. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
160 lines
4.3 KiB
Python
160 lines
4.3 KiB
Python
"""
|
|
Test 1: Beam Optimization Verification
|
|
November 18, 2025
|
|
|
|
This test verifies:
|
|
1. Parameter bounds parsing (20-30mm not 0.2-1.0mm)
|
|
2. Workflow config auto-saved
|
|
3. Extractors added to core library
|
|
4. Study manifest created (not code pollution)
|
|
5. Clean study folder structure
|
|
|
|
This test uses the CLI runner with workflow JSON (Hybrid Mode).
|
|
"""
|
|
|
|
import subprocess
|
|
import json
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
|
|
# Setup
|
|
study_dir = Path("studies/simple_beam_optimization")
|
|
workflow_json = study_dir / "1_setup/workflow_config.json"
|
|
prt_file = study_dir / "1_setup/model/Beam.prt"
|
|
sim_file = study_dir / "1_setup/model/Beam_sim1.sim"
|
|
output_dir = study_dir / "2_substudies/test_nov18_verification"
|
|
|
|
print("="*80)
|
|
print("TEST 1: BEAM OPTIMIZATION VERIFICATION - HYBRID MODE")
|
|
print("="*80)
|
|
print()
|
|
print(f"Workflow JSON: {workflow_json}")
|
|
print(f"Model: {prt_file}")
|
|
print(f"Output: {output_dir}")
|
|
print()
|
|
print("Running 5 trials to verify system...")
|
|
print()
|
|
|
|
# Build command using the CLI runner
|
|
python_exe = "c:/Users/antoi/anaconda3/envs/test_env/python.exe"
|
|
|
|
cmd = [
|
|
python_exe,
|
|
"optimization_engine/run_optimization.py",
|
|
"--config", str(workflow_json),
|
|
"--prt", str(prt_file),
|
|
"--sim", str(sim_file),
|
|
"--output", str(output_dir.parent),
|
|
"--study-name", output_dir.name,
|
|
"--trials", "5"
|
|
]
|
|
|
|
print("Command:")
|
|
print(" ".join(cmd))
|
|
print()
|
|
print("="*80)
|
|
print("OPTIMIZATION RUNNING...")
|
|
print("="*80)
|
|
print()
|
|
|
|
# Run the optimization
|
|
start_time = datetime.now()
|
|
result = subprocess.run(cmd, capture_output=False, text=True)
|
|
end_time = datetime.now()
|
|
|
|
duration = (end_time - start_time).total_seconds()
|
|
|
|
print()
|
|
print("="*80)
|
|
print(f"COMPLETED in {duration:.1f} seconds ({duration/60:.1f} minutes)")
|
|
print("="*80)
|
|
print()
|
|
|
|
# Check results
|
|
if result.returncode != 0:
|
|
print("[FAIL] Optimization failed with return code:", result.returncode)
|
|
exit(1)
|
|
|
|
# Verify outputs
|
|
print("Verifying outputs...")
|
|
print()
|
|
|
|
manifest_file = output_dir / "extractors_manifest.json"
|
|
results_file = output_dir / "optimization_results.json"
|
|
history_file = output_dir / "optimization_history.json"
|
|
workflow_saved = output_dir / "llm_workflow_config.json"
|
|
|
|
checks = []
|
|
|
|
# 1. Manifest exists
|
|
if manifest_file.exists():
|
|
print(f" [OK] Extractors manifest: {manifest_file.name}")
|
|
checks.append(True)
|
|
else:
|
|
print(f" [FAIL] Extractors manifest not found")
|
|
checks.append(False)
|
|
|
|
# 2. Results exist
|
|
if results_file.exists():
|
|
print(f" [OK] Results file: {results_file.name}")
|
|
checks.append(True)
|
|
|
|
# Check parameter values
|
|
with open(results_file) as f:
|
|
results = json.load(f)
|
|
|
|
thickness = results['best_params']['beam_half_core_thickness']
|
|
if 20 <= thickness <= 30:
|
|
print(f" [OK] Parameter ranges correct: thickness = {thickness:.2f} mm")
|
|
checks.append(True)
|
|
else:
|
|
print(f" [FAIL] Parameter out of range: thickness = {thickness:.2f} mm (expected 20-30)")
|
|
checks.append(False)
|
|
else:
|
|
print(f" [FAIL] Results file not found")
|
|
checks.append(False)
|
|
|
|
# 3. History exists
|
|
if history_file.exists():
|
|
print(f" [OK] History file: {history_file.name}")
|
|
checks.append(True)
|
|
else:
|
|
print(f" [FAIL] History file not found")
|
|
checks.append(False)
|
|
|
|
# 4. Workflow config saved
|
|
if workflow_saved.exists():
|
|
print(f" [OK] Workflow config saved: {workflow_saved.name}")
|
|
checks.append(True)
|
|
else:
|
|
print(f" [FAIL] Workflow config not saved")
|
|
checks.append(False)
|
|
|
|
# 5. NO generated_extractors directory (clean!)
|
|
generated_extractors = output_dir / "generated_extractors"
|
|
if not generated_extractors.exists():
|
|
print(f" [OK] No generated_extractors/ pollution (clean!)")
|
|
checks.append(True)
|
|
else:
|
|
print(f" [FAIL] generated_extractors/ directory exists (not clean)")
|
|
checks.append(False)
|
|
|
|
print()
|
|
print("="*80)
|
|
print(f"TEST 1 RESULTS: {sum(checks)}/{len(checks)} checks passed")
|
|
print("="*80)
|
|
print()
|
|
|
|
if all(checks):
|
|
print("[SUCCESS] All checks passed!")
|
|
print()
|
|
print("Verified:")
|
|
print(" ✓ Parameter bounds parsed correctly (20-30mm)")
|
|
print(" ✓ Extractors manifest created (centralized library)")
|
|
print(" ✓ Study folder clean (no code pollution)")
|
|
print(" ✓ Workflow config auto-saved")
|
|
print(" ✓ Optimization completed successfully")
|
|
else:
|
|
print("[FAIL] Some checks failed")
|
|
exit(1)
|