2025-11-17 14:36:00 -05:00
|
|
|
"""
|
|
|
|
|
Study Creator - Atomizer Optimization Study Management
|
|
|
|
|
|
|
|
|
|
Creates and manages optimization studies with mandatory benchmarking workflow.
|
|
|
|
|
|
|
|
|
|
Workflow:
|
|
|
|
|
1. Create study structure
|
|
|
|
|
2. User provides NX models
|
|
|
|
|
3. Run benchmarking (mandatory)
|
|
|
|
|
4. Create substudies (substudy_1, substudy_2, etc.)
|
|
|
|
|
5. Each substudy validates against benchmarking before running
|
|
|
|
|
|
|
|
|
|
Author: Antoine Letarte
|
|
|
|
|
Date: 2025-11-17
|
|
|
|
|
Version: 1.0.0
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
import shutil
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Dict, Any, Optional, List
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
import logging
|
|
|
|
|
|
refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability.
Backwards compatibility aliases with deprecation warnings are provided.
New Structure:
- core/ - Optimization runners (runner, intelligent_optimizer, etc.)
- processors/ - Data processing
- surrogates/ - Neural network surrogates
- nx/ - NX/Nastran integration (solver, updater, session_manager)
- study/ - Study management (creator, wizard, state, reset)
- reporting/ - Reports and analysis (visualizer, report_generator)
- config/ - Configuration management (manager, builder)
- utils/ - Utilities (logger, auto_doc, etc.)
- future/ - Research/experimental code
Migration:
- ~200 import changes across 125 files
- All __init__.py files use lazy loading to avoid circular imports
- Backwards compatibility layer supports old import paths with warnings
- All existing functionality preserved
To migrate existing code:
OLD: from optimization_engine.nx_solver import NXSolver
NEW: from optimization_engine.nx.solver import NXSolver
OLD: from optimization_engine.runner import OptimizationRunner
NEW: from optimization_engine.core.runner import OptimizationRunner
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-29 12:30:59 -05:00
|
|
|
from optimization_engine.study.benchmarking import BenchmarkingSubstudy, BenchmarkResults
|
2025-11-17 14:36:00 -05:00
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class StudyCreator:
|
|
|
|
|
"""
|
|
|
|
|
Creates and manages Atomizer optimization studies.
|
|
|
|
|
|
|
|
|
|
Enforces mandatory benchmarking workflow and provides
|
|
|
|
|
study structure management.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, studies_root: Path = None):
|
|
|
|
|
"""
|
|
|
|
|
Initialize study creator.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
studies_root: Root directory for all studies (default: ./studies)
|
|
|
|
|
"""
|
|
|
|
|
if studies_root is None:
|
|
|
|
|
studies_root = Path.cwd() / "studies"
|
|
|
|
|
|
|
|
|
|
self.studies_root = Path(studies_root)
|
|
|
|
|
self.studies_root.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
logger.info(f"StudyCreator initialized: {self.studies_root}")
|
|
|
|
|
|
|
|
|
|
def create_study(self, study_name: str, description: str = "") -> Path:
|
|
|
|
|
"""
|
|
|
|
|
Create a new optimization study with standard structure.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
study_name: Name of the study (will be folder name)
|
|
|
|
|
description: Brief description of the study
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Path to created study directory
|
|
|
|
|
"""
|
|
|
|
|
study_dir = self.studies_root / study_name
|
|
|
|
|
|
|
|
|
|
if study_dir.exists():
|
|
|
|
|
logger.warning(f"Study already exists: {study_name}")
|
|
|
|
|
return study_dir
|
|
|
|
|
|
|
|
|
|
logger.info(f"Creating new study: {study_name}")
|
|
|
|
|
|
|
|
|
|
# Create directory structure
|
|
|
|
|
(study_dir / "model").mkdir(parents=True)
|
|
|
|
|
(study_dir / "substudies" / "benchmarking").mkdir(parents=True)
|
|
|
|
|
(study_dir / "config").mkdir(parents=True)
|
|
|
|
|
(study_dir / "plugins" / "post_calculation").mkdir(parents=True)
|
|
|
|
|
(study_dir / "results").mkdir(parents=True)
|
|
|
|
|
|
|
|
|
|
# Create study metadata
|
|
|
|
|
metadata = {
|
|
|
|
|
"study_name": study_name,
|
|
|
|
|
"description": description,
|
|
|
|
|
"created": datetime.now().isoformat(),
|
|
|
|
|
"status": "created",
|
|
|
|
|
"benchmarking_completed": False,
|
|
|
|
|
"substudies": []
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata_file = study_dir / "study_metadata.json"
|
|
|
|
|
with open(metadata_file, 'w') as f:
|
|
|
|
|
json.dump(metadata, f, indent=2)
|
|
|
|
|
|
|
|
|
|
# Create README
|
|
|
|
|
readme_content = self._generate_study_readme(study_name, description)
|
|
|
|
|
readme_file = study_dir / "README.md"
|
|
|
|
|
with open(readme_file, 'w', encoding='utf-8') as f:
|
|
|
|
|
f.write(readme_content)
|
|
|
|
|
|
|
|
|
|
logger.info(f"Study created: {study_dir}")
|
|
|
|
|
logger.info("")
|
|
|
|
|
logger.info("Next steps:")
|
|
|
|
|
logger.info(f" 1. Add NX model files to: {study_dir / 'model'}/")
|
|
|
|
|
logger.info(f" 2. Run benchmarking: study.run_benchmarking()")
|
|
|
|
|
logger.info("")
|
|
|
|
|
|
|
|
|
|
return study_dir
|
|
|
|
|
|
|
|
|
|
def run_benchmarking(self, study_dir: Path, prt_file: Path, sim_file: Path) -> BenchmarkResults:
|
|
|
|
|
"""
|
|
|
|
|
Run mandatory benchmarking for a study.
|
|
|
|
|
|
|
|
|
|
This MUST be run before any optimization substudies.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
study_dir: Study directory
|
|
|
|
|
prt_file: Path to NX part file
|
|
|
|
|
sim_file: Path to NX simulation file
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
BenchmarkResults
|
|
|
|
|
"""
|
|
|
|
|
logger.info("=" * 80)
|
|
|
|
|
logger.info(f"RUNNING BENCHMARKING FOR STUDY: {study_dir.name}")
|
|
|
|
|
logger.info("=" * 80)
|
|
|
|
|
logger.info("")
|
|
|
|
|
|
|
|
|
|
# Create benchmarking substudy
|
|
|
|
|
benchmark = BenchmarkingSubstudy(study_dir, prt_file, sim_file)
|
|
|
|
|
|
|
|
|
|
# Run discovery
|
|
|
|
|
results = benchmark.run_discovery()
|
|
|
|
|
|
|
|
|
|
# Generate report
|
|
|
|
|
report_content = benchmark.generate_report(results)
|
|
|
|
|
report_file = study_dir / "substudies" / "benchmarking" / "BENCHMARK_REPORT.md"
|
|
|
|
|
with open(report_file, 'w', encoding='utf-8') as f:
|
|
|
|
|
f.write(report_content)
|
|
|
|
|
|
|
|
|
|
logger.info(f"Benchmark report saved to: {report_file}")
|
|
|
|
|
logger.info("")
|
|
|
|
|
|
|
|
|
|
# Update metadata
|
|
|
|
|
self._update_metadata(study_dir, {
|
|
|
|
|
"benchmarking_completed": results.validation_passed,
|
|
|
|
|
"last_benchmarking": datetime.now().isoformat(),
|
|
|
|
|
"status": "benchmarked" if results.validation_passed else "benchmark_failed"
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
if not results.validation_passed:
|
|
|
|
|
logger.error("Benchmarking validation FAILED!")
|
|
|
|
|
logger.error("Fix issues before creating substudies")
|
|
|
|
|
else:
|
|
|
|
|
logger.info("Benchmarking validation PASSED!")
|
|
|
|
|
logger.info("Ready to create substudies")
|
|
|
|
|
|
|
|
|
|
logger.info("")
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def create_substudy(self, study_dir: Path, substudy_name: Optional[str] = None,
|
|
|
|
|
config: Optional[Dict[str, Any]] = None) -> Path:
|
|
|
|
|
"""
|
|
|
|
|
Create a new substudy.
|
|
|
|
|
|
|
|
|
|
Automatically validates against benchmarking before proceeding.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
study_dir: Study directory
|
|
|
|
|
substudy_name: Name of substudy (if None, auto-generates substudy_N)
|
|
|
|
|
config: Optional configuration dict
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Path to substudy directory
|
|
|
|
|
"""
|
|
|
|
|
# Check benchmarking completed
|
|
|
|
|
metadata = self._load_metadata(study_dir)
|
|
|
|
|
|
|
|
|
|
if not metadata.get('benchmarking_completed', False):
|
|
|
|
|
raise ValueError(
|
|
|
|
|
"Benchmarking must be completed before creating substudies!\n"
|
|
|
|
|
f"Run: study.run_benchmarking(prt_file, sim_file)"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Auto-generate substudy name if not provided
|
|
|
|
|
if substudy_name is None:
|
|
|
|
|
existing_substudies = metadata.get('substudies', [])
|
|
|
|
|
# Filter out benchmarking
|
|
|
|
|
non_benchmark = [s for s in existing_substudies if s != 'benchmarking']
|
|
|
|
|
substudy_number = len(non_benchmark) + 1
|
|
|
|
|
substudy_name = f"substudy_{substudy_number}"
|
|
|
|
|
|
|
|
|
|
substudy_dir = study_dir / "substudies" / substudy_name
|
|
|
|
|
|
|
|
|
|
if substudy_dir.exists():
|
|
|
|
|
logger.warning(f"Substudy already exists: {substudy_name}")
|
|
|
|
|
return substudy_dir
|
|
|
|
|
|
|
|
|
|
logger.info(f"Creating substudy: {substudy_name}")
|
|
|
|
|
|
|
|
|
|
# Create substudy directory
|
|
|
|
|
substudy_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
# Create substudy config
|
|
|
|
|
if config is None:
|
|
|
|
|
# Use template
|
|
|
|
|
config = self._create_default_substudy_config(study_dir, substudy_name)
|
|
|
|
|
|
|
|
|
|
config_file = substudy_dir / "config.json"
|
|
|
|
|
with open(config_file, 'w') as f:
|
|
|
|
|
json.dump(config, f, indent=2)
|
|
|
|
|
|
|
|
|
|
# Update metadata
|
|
|
|
|
substudies = metadata.get('substudies', [])
|
|
|
|
|
if substudy_name not in substudies:
|
|
|
|
|
substudies.append(substudy_name)
|
|
|
|
|
self._update_metadata(study_dir, {'substudies': substudies})
|
|
|
|
|
|
|
|
|
|
logger.info(f"Substudy created: {substudy_dir}")
|
|
|
|
|
logger.info(f"Config: {config_file}")
|
|
|
|
|
logger.info("")
|
|
|
|
|
|
|
|
|
|
return substudy_dir
|
|
|
|
|
|
|
|
|
|
def _create_default_substudy_config(self, study_dir: Path, substudy_name: str) -> Dict[str, Any]:
|
|
|
|
|
"""Create default substudy configuration based on benchmarking."""
|
|
|
|
|
# Load benchmark results
|
|
|
|
|
benchmark_file = study_dir / "substudies" / "benchmarking" / "benchmark_results.json"
|
|
|
|
|
|
|
|
|
|
if not benchmark_file.exists():
|
|
|
|
|
raise FileNotFoundError(f"Benchmark results not found: {benchmark_file}")
|
|
|
|
|
|
|
|
|
|
with open(benchmark_file, 'r') as f:
|
|
|
|
|
benchmark_data = json.load(f)
|
|
|
|
|
|
|
|
|
|
# Create config from benchmark proposals
|
|
|
|
|
config = {
|
|
|
|
|
"substudy_name": substudy_name,
|
|
|
|
|
"description": f"Substudy {substudy_name}",
|
|
|
|
|
"created": datetime.now().isoformat(),
|
|
|
|
|
|
|
|
|
|
"optimization": {
|
|
|
|
|
"algorithm": "TPE",
|
|
|
|
|
"direction": "minimize",
|
|
|
|
|
"n_trials": 20,
|
|
|
|
|
"n_startup_trials": 10,
|
|
|
|
|
"design_variables": []
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
"continuation": {
|
|
|
|
|
"enabled": False
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
"solver": {
|
|
|
|
|
"nastran_version": "2412",
|
|
|
|
|
"use_journal": True,
|
|
|
|
|
"timeout": 300
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Add proposed design variables
|
|
|
|
|
for var in benchmark_data.get('proposed_design_variables', []):
|
|
|
|
|
config["optimization"]["design_variables"].append({
|
|
|
|
|
"parameter": var['parameter'],
|
|
|
|
|
"min": 0.0, # User must fill
|
|
|
|
|
"max": 0.0, # User must fill
|
|
|
|
|
"units": var.get('units', ''),
|
|
|
|
|
"comment": f"From benchmarking: {var.get('suggested_range', 'define range')}"
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
def _load_metadata(self, study_dir: Path) -> Dict[str, Any]:
|
|
|
|
|
"""Load study metadata."""
|
|
|
|
|
metadata_file = study_dir / "study_metadata.json"
|
|
|
|
|
|
|
|
|
|
if not metadata_file.exists():
|
|
|
|
|
return {}
|
|
|
|
|
|
|
|
|
|
with open(metadata_file, 'r') as f:
|
|
|
|
|
return json.load(f)
|
|
|
|
|
|
|
|
|
|
def _update_metadata(self, study_dir: Path, updates: Dict[str, Any]):
|
|
|
|
|
"""Update study metadata."""
|
|
|
|
|
metadata = self._load_metadata(study_dir)
|
|
|
|
|
metadata.update(updates)
|
|
|
|
|
|
|
|
|
|
metadata_file = study_dir / "study_metadata.json"
|
|
|
|
|
with open(metadata_file, 'w') as f:
|
|
|
|
|
json.dump(metadata, f, indent=2)
|
|
|
|
|
|
|
|
|
|
def _generate_study_readme(self, study_name: str, description: str) -> str:
|
|
|
|
|
"""Generate README for new study."""
|
|
|
|
|
readme = []
|
|
|
|
|
readme.append(f"# {study_name}")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append(f"**Description**: {description}")
|
|
|
|
|
readme.append(f"**Created**: {datetime.now().strftime('%Y-%m-%d')}")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("## Study Structure")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("```")
|
|
|
|
|
readme.append(f"{study_name}/")
|
|
|
|
|
readme.append("├── model/ # NX model files (.prt, .sim)")
|
|
|
|
|
readme.append("├── substudies/")
|
|
|
|
|
readme.append("│ ├── benchmarking/ # Mandatory discovery & validation")
|
|
|
|
|
readme.append("│ ├── substudy_1/ # First optimization campaign")
|
|
|
|
|
readme.append("│ └── substudy_2/ # Additional campaigns")
|
|
|
|
|
readme.append("├── config/ # Configuration templates")
|
|
|
|
|
readme.append("├── plugins/ # Study-specific hooks")
|
|
|
|
|
readme.append("├── results/ # Optimization results")
|
|
|
|
|
readme.append("└── README.md # This file")
|
|
|
|
|
readme.append("```")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("## Workflow")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("### 1. Add NX Models")
|
|
|
|
|
readme.append("Place your `.prt` and `.sim` files in the `model/` directory.")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("### 2. Run Benchmarking (Mandatory)")
|
|
|
|
|
readme.append("```python")
|
refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability.
Backwards compatibility aliases with deprecation warnings are provided.
New Structure:
- core/ - Optimization runners (runner, intelligent_optimizer, etc.)
- processors/ - Data processing
- surrogates/ - Neural network surrogates
- nx/ - NX/Nastran integration (solver, updater, session_manager)
- study/ - Study management (creator, wizard, state, reset)
- reporting/ - Reports and analysis (visualizer, report_generator)
- config/ - Configuration management (manager, builder)
- utils/ - Utilities (logger, auto_doc, etc.)
- future/ - Research/experimental code
Migration:
- ~200 import changes across 125 files
- All __init__.py files use lazy loading to avoid circular imports
- Backwards compatibility layer supports old import paths with warnings
- All existing functionality preserved
To migrate existing code:
OLD: from optimization_engine.nx_solver import NXSolver
NEW: from optimization_engine.nx.solver import NXSolver
OLD: from optimization_engine.runner import OptimizationRunner
NEW: from optimization_engine.core.runner import OptimizationRunner
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-29 12:30:59 -05:00
|
|
|
readme.append("from optimization_engine.study.creator import StudyCreator")
|
2025-11-17 14:36:00 -05:00
|
|
|
readme.append("")
|
|
|
|
|
readme.append("creator = StudyCreator()")
|
|
|
|
|
readme.append(f"results = creator.run_benchmarking(")
|
|
|
|
|
readme.append(f" study_dir=Path('studies/{study_name}'),")
|
|
|
|
|
readme.append(" prt_file=Path('studies/{study_name}/model/YourPart.prt'),")
|
|
|
|
|
readme.append(" sim_file=Path('studies/{study_name}/model/YourSim.sim')")
|
|
|
|
|
readme.append(")")
|
|
|
|
|
readme.append("```")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("### 3. Review Benchmark Report")
|
|
|
|
|
readme.append("Check `substudies/benchmarking/BENCHMARK_REPORT.md` for:")
|
|
|
|
|
readme.append("- Discovered expressions")
|
|
|
|
|
readme.append("- OP2 contents")
|
|
|
|
|
readme.append("- Baseline performance")
|
|
|
|
|
readme.append("- Configuration proposals")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("### 4. Create Substudies")
|
|
|
|
|
readme.append("```python")
|
|
|
|
|
readme.append("# Auto-numbered: substudy_1, substudy_2, etc.")
|
|
|
|
|
readme.append(f"substudy_dir = creator.create_substudy(Path('studies/{study_name}'))")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("# Or custom name:")
|
|
|
|
|
readme.append(f"substudy_dir = creator.create_substudy(")
|
|
|
|
|
readme.append(f" Path('studies/{study_name}'), ")
|
|
|
|
|
readme.append(" substudy_name='coarse_exploration'")
|
|
|
|
|
readme.append(")")
|
|
|
|
|
readme.append("```")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("### 5. Configure & Run Optimization")
|
|
|
|
|
readme.append("Edit `substudies/substudy_N/config.json` with:")
|
|
|
|
|
readme.append("- Design variable ranges")
|
|
|
|
|
readme.append("- Objectives and constraints")
|
|
|
|
|
readme.append("- Number of trials")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("Then run the optimization!")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("## Status")
|
|
|
|
|
readme.append("")
|
|
|
|
|
readme.append("See `study_metadata.json` for current study status.")
|
|
|
|
|
readme.append("")
|
|
|
|
|
|
|
|
|
|
return "\n".join(readme)
|
|
|
|
|
|
|
|
|
|
def list_studies(self) -> List[Dict[str, Any]]:
|
|
|
|
|
"""List all studies in the studies root."""
|
|
|
|
|
studies = []
|
|
|
|
|
|
|
|
|
|
for study_dir in self.studies_root.iterdir():
|
|
|
|
|
if not study_dir.is_dir():
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
metadata_file = study_dir / "study_metadata.json"
|
|
|
|
|
if metadata_file.exists():
|
|
|
|
|
with open(metadata_file, 'r') as f:
|
|
|
|
|
metadata = json.load(f)
|
|
|
|
|
studies.append({
|
|
|
|
|
'name': study_dir.name,
|
|
|
|
|
'path': study_dir,
|
|
|
|
|
'status': metadata.get('status', 'unknown'),
|
|
|
|
|
'created': metadata.get('created', 'unknown'),
|
|
|
|
|
'benchmarking_completed': metadata.get('benchmarking_completed', False),
|
|
|
|
|
'substudies_count': len(metadata.get('substudies', [])) - 1 # Exclude benchmarking
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
return studies
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
"""Example usage of StudyCreator."""
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print("Atomizer Study Creator")
|
|
|
|
|
print("=" * 80)
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
creator = StudyCreator()
|
|
|
|
|
|
|
|
|
|
# List existing studies
|
|
|
|
|
studies = creator.list_studies()
|
|
|
|
|
print(f"Existing studies: {len(studies)}")
|
|
|
|
|
for study in studies:
|
|
|
|
|
status_icon = "✅" if study['benchmarking_completed'] else "⚠️"
|
|
|
|
|
print(f" {status_icon} {study['name']} ({study['status']}) - {study['substudies_count']} substudies")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
print("To create a new study:")
|
|
|
|
|
print(" creator.create_study('my_study_name', 'Brief description')")
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
main()
|