Major changes: - Dashboard: WebSocket-based chat with session management - Dashboard: New chat components (ChatPane, ChatInput, ModeToggle) - Dashboard: Enhanced UI with parallel coordinates chart - MCP Server: New atomizer-tools server for Claude integration - Extractors: Enhanced Zernike OPD extractor - Reports: Improved report generator New studies (configs and scripts only): - M1 Mirror: Cost reduction campaign studies - Simple Beam, Simple Bracket, UAV Arm studies Note: Large iteration data (2_iterations/, best_design_archive/) excluded via .gitignore - kept on local Gitea only. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
294 lines
11 KiB
Python
294 lines
11 KiB
Python
"""
|
|
Bracket Displacement Maximization - Substudy Runner
|
|
====================================================
|
|
|
|
Run optimization substudies with shared model but independent configurations.
|
|
Supports:
|
|
- Multiple substudies with different parameters (coarse/fine, different algorithms)
|
|
- Continuation from previous substudy results
|
|
- Real-time incremental history updates
|
|
- Shared model files (Bracket.prt, Bracket_fem1.fem, Bracket_sim1.sim)
|
|
|
|
Usage:
|
|
python run_substudy.py coarse_exploration
|
|
python run_substudy.py fine_tuning
|
|
"""
|
|
|
|
import sys
|
|
import json
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
|
|
# Add parent directories to path
|
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
|
|
|
from optimization_engine.config.setup_wizard import OptimizationSetupWizard
|
|
from optimization_engine.future.llm_optimization_runner import LLMOptimizationRunner
|
|
from optimization_engine.nx.solver import NXSolver
|
|
from optimization_engine.nx.updater import NXParameterUpdater
|
|
|
|
|
|
def print_section(title: str):
|
|
"""Print a section header."""
|
|
print()
|
|
print("=" * 80)
|
|
print(f" {title}")
|
|
print("=" * 80)
|
|
print()
|
|
|
|
|
|
def load_substudy_config(substudy_path: Path) -> dict:
|
|
"""Load substudy configuration."""
|
|
config_file = substudy_path / "config.json"
|
|
if not config_file.exists():
|
|
raise FileNotFoundError(f"Substudy config not found: {config_file}")
|
|
|
|
with open(config_file, 'r') as f:
|
|
return json.load(f)
|
|
|
|
|
|
def load_parent_best_params(parent_substudy_path: Path) -> dict:
|
|
"""Load best parameters from parent substudy for continuation."""
|
|
best_file = parent_substudy_path / "best_design.json"
|
|
if not best_file.exists():
|
|
return None
|
|
|
|
with open(best_file, 'r') as f:
|
|
best_design = json.load(f)
|
|
return best_design.get('parameters', {})
|
|
|
|
|
|
def save_substudy_results(results: dict, substudy_dir: Path, config: dict):
|
|
"""Save substudy results."""
|
|
# Save complete history
|
|
history_file = substudy_dir / "optimization_history.json"
|
|
with open(history_file, 'w') as f:
|
|
json.dump(results['history'], f, indent=2, default=str)
|
|
|
|
# Save best design
|
|
# Find the best trial in history (trial_number starts at 1, but list is 0-indexed)
|
|
best_trial_num = results['best_trial_number']
|
|
best_trial = next((t for t in results['history'] if t['trial_number'] == best_trial_num), None)
|
|
if not best_trial:
|
|
# Fallback: assume trial numbers are 1-indexed
|
|
best_trial = results['history'][best_trial_num - 1] if best_trial_num > 0 else results['history'][0]
|
|
best_design = {
|
|
'substudy_name': config['substudy_name'],
|
|
'trial_number': results['best_trial_number'],
|
|
'parameters': results['best_params'],
|
|
'objective_value': results['best_value'],
|
|
'results': best_trial['results'],
|
|
'calculations': best_trial['calculations'],
|
|
'timestamp': datetime.now().isoformat()
|
|
}
|
|
|
|
best_file = substudy_dir / "best_design.json"
|
|
with open(best_file, 'w') as f:
|
|
json.dump(best_design, f, indent=2, default=str)
|
|
|
|
# Generate markdown report
|
|
report_file = substudy_dir / "report.md"
|
|
with open(report_file, 'w') as f:
|
|
f.write(f"# {config['substudy_name']} - Optimization Report\n\n")
|
|
f.write(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
|
f.write(f"**Description**: {config.get('description', 'N/A')}\n\n")
|
|
|
|
f.write("## Configuration\n\n")
|
|
f.write(f"- **Algorithm**: {config['optimization']['algorithm']}\n")
|
|
f.write(f"- **Trials**: {config['optimization']['n_trials']}\n")
|
|
f.write(f"- **Direction**: {config['optimization']['direction']}\n\n")
|
|
|
|
if config.get('continuation', {}).get('enabled'):
|
|
f.write("## Continuation\n\n")
|
|
f.write(f"- Continued from: `{config['continuation']['from_substudy']}`\n")
|
|
f.write(f"- Inherit best params: {config['continuation'].get('inherit_best_params', False)}\n\n")
|
|
|
|
f.write("## Best Design\n\n")
|
|
f.write(f"- **Trial**: {results['best_trial_number']}\n")
|
|
for var in config['optimization']['design_variables']:
|
|
var_name = var['name']
|
|
f.write(f"- **{var_name}**: {results['best_params'][var_name]:.3f} {var.get('units', '')}\n")
|
|
f.write(f"- **Objective**: {results['best_value']:.6f}\n\n")
|
|
|
|
best_results = best_trial['results']
|
|
best_calcs = best_trial['calculations']
|
|
|
|
f.write("## Performance\n\n")
|
|
f.write(f"- **Max displacement**: {best_results.get('max_displacement', 0):.6f} mm\n")
|
|
f.write(f"- **Max stress**: {best_results.get('max_von_mises', 0):.3f} MPa\n")
|
|
f.write(f"- **Safety factor**: {best_calcs.get('safety_factor', 0):.3f}\n\n")
|
|
|
|
f.write("## Trial History\n\n")
|
|
f.write("| Trial | Tip (mm) | Angle (°) | Disp (mm) | Stress (MPa) | SF | Objective |\n")
|
|
f.write("|-------|----------|-----------|-----------|--------------|----|-----------|\n")
|
|
|
|
for trial in results['history']:
|
|
num = trial['trial_number']
|
|
tip = trial['design_variables']['tip_thickness']
|
|
ang = trial['design_variables']['support_angle']
|
|
disp = trial['results'].get('max_displacement', 0)
|
|
stress = trial['results'].get('max_von_mises', 0)
|
|
sf = trial['calculations'].get('safety_factor', 0)
|
|
obj = trial['objective']
|
|
f.write(f"| {num} | {tip:.2f} | {ang:.2f} | {disp:.6f} | {stress:.2f} | {sf:.2f} | {obj:.6f} |\n")
|
|
|
|
return history_file, best_file, report_file
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) < 2:
|
|
print("Usage: python run_substudy.py <substudy_name>")
|
|
print("\nAvailable substudies:")
|
|
study_dir = Path(__file__).parent
|
|
substudies_dir = study_dir / "substudies"
|
|
if substudies_dir.exists():
|
|
for substudy in sorted(substudies_dir.iterdir()):
|
|
if substudy.is_dir() and (substudy / "config.json").exists():
|
|
print(f" - {substudy.name}")
|
|
sys.exit(1)
|
|
|
|
substudy_name = sys.argv[1]
|
|
|
|
print_section(f"SUBSTUDY: {substudy_name.upper()}")
|
|
|
|
# Paths
|
|
study_dir = Path(__file__).parent
|
|
substudy_dir = study_dir / "substudies" / substudy_name
|
|
|
|
# Shared model files
|
|
prt_file = study_dir / "model" / "Bracket.prt"
|
|
sim_file = study_dir / "model" / "Bracket_sim1.sim"
|
|
|
|
if not substudy_dir.exists():
|
|
print(f"ERROR: Substudy directory not found: {substudy_dir}")
|
|
sys.exit(1)
|
|
|
|
# Load configuration
|
|
config = load_substudy_config(substudy_dir)
|
|
|
|
print(f"Substudy: {config['substudy_name']}")
|
|
print(f"Description: {config.get('description', 'N/A')}")
|
|
print(f"Trials: {config['optimization']['n_trials']}")
|
|
print()
|
|
|
|
# Check for continuation
|
|
if config.get('continuation', {}).get('enabled'):
|
|
parent_substudy = config['continuation']['from_substudy']
|
|
parent_dir = study_dir / "substudies" / parent_substudy
|
|
print(f"Continuation enabled from: {parent_substudy}")
|
|
|
|
if config['continuation'].get('inherit_best_params'):
|
|
best_params = load_parent_best_params(parent_dir)
|
|
if best_params:
|
|
print(f"Starting from best parameters: {best_params}")
|
|
print()
|
|
|
|
# Run wizard validation (only once per study, use cached baseline)
|
|
print_section("VALIDATION")
|
|
|
|
baseline_op2 = study_dir / "model" / "bracket_sim1-solution_1.op2"
|
|
if baseline_op2.exists():
|
|
print("Using existing baseline OP2 for validation")
|
|
else:
|
|
print("Running baseline simulation...")
|
|
wizard = OptimizationSetupWizard(prt_file, sim_file)
|
|
wizard.run_baseline_simulation()
|
|
|
|
# Setup optimization
|
|
print_section("OPTIMIZATION SETUP")
|
|
|
|
# Build LLM workflow from substudy config
|
|
llm_workflow = {
|
|
'engineering_features': [
|
|
{
|
|
'action': 'extract_displacement',
|
|
'domain': 'result_extraction',
|
|
'params': {'result_type': 'displacement'}
|
|
},
|
|
{
|
|
'action': 'extract_solid_stress',
|
|
'domain': 'result_extraction',
|
|
'params': {
|
|
'result_type': 'stress',
|
|
'element_type': 'chexa' # Auto-detected from baseline
|
|
}
|
|
}
|
|
],
|
|
'inline_calculations': [
|
|
{
|
|
'action': 'calculate_safety_factor',
|
|
'params': {
|
|
'input': 'max_von_mises',
|
|
'yield_strength': 276.0,
|
|
'operation': 'divide'
|
|
},
|
|
'code_hint': 'safety_factor = 276.0 / max_von_mises'
|
|
},
|
|
{
|
|
'action': 'negate_displacement',
|
|
'params': {
|
|
'input': 'max_displacement',
|
|
'operation': 'negate'
|
|
},
|
|
'code_hint': 'neg_displacement = -max_displacement'
|
|
}
|
|
],
|
|
'post_processing_hooks': [],
|
|
'optimization': config['optimization']
|
|
}
|
|
|
|
# Model updater and simulation runner
|
|
updater = NXParameterUpdater(prt_file_path=prt_file)
|
|
def model_updater(design_vars: dict):
|
|
updater.update_expressions(design_vars)
|
|
updater.save()
|
|
|
|
solver = NXSolver(nastran_version='2412', use_journal=True)
|
|
def simulation_runner(design_vars: dict) -> Path:
|
|
result = solver.run_simulation(sim_file, expression_updates=design_vars)
|
|
return result['op2_file']
|
|
|
|
# Initialize runner with substudy-specific output directory
|
|
runner = LLMOptimizationRunner(
|
|
llm_workflow=llm_workflow,
|
|
model_updater=model_updater,
|
|
simulation_runner=simulation_runner,
|
|
study_name=f"{config['substudy_name']}",
|
|
output_dir=substudy_dir
|
|
)
|
|
|
|
print(f" [OK] Output directory: {substudy_dir}")
|
|
print(f" [OK] Incremental history: optimization_history_incremental.json")
|
|
print()
|
|
|
|
# Run optimization
|
|
print_section("RUNNING OPTIMIZATION")
|
|
|
|
start_time = datetime.now()
|
|
results = runner.run_optimization(n_trials=config['optimization']['n_trials'])
|
|
end_time = datetime.now()
|
|
|
|
duration = (end_time - start_time).total_seconds()
|
|
|
|
print()
|
|
print_section("OPTIMIZATION COMPLETE!")
|
|
|
|
print(f"Duration: {duration:.1f}s ({duration/60:.1f} min)")
|
|
print()
|
|
|
|
# Save results
|
|
print("Saving results...")
|
|
history_file, best_file, report_file = save_substudy_results(results, substudy_dir, config)
|
|
print(f" [OK] History: {history_file.name}")
|
|
print(f" [OK] Best design: {best_file.name}")
|
|
print(f" [OK] Report: {report_file.name}")
|
|
print()
|
|
|
|
print_section("SUBSTUDY COMPLETE!")
|
|
print(f"Substudy directory: {substudy_dir}")
|
|
print()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|